code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import numpy
import onnx
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Dict = a.name
_lowercase : Optional[int] = b.name
_lowercase : Tuple = ""
_lowercase : Union[str, Any] = ""
_lowercase : str = a == b
_lowercase : Tuple = name_a
_lowercase : Any = name_b
return res
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCAmelCase , _UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _UpperCAmelCase , _UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : int = list(model.graph.initializer )
_lowercase : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : int = inits[i].name
_lowercase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : int = os.path.dirname(_UpperCAmelCase )
_lowercase : Tuple = os.path.basename(_UpperCAmelCase )
_lowercase : Optional[Any] = onnx.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
_lowercase : Tuple = list(model.graph.initializer )
_lowercase : Union[str, Any] = set()
_lowercase : Union[str, Any] = {}
_lowercase : int = []
_lowercase : Any = 0
for i in range(len(_UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_UpperCAmelCase )
dup_set.add(_UpperCAmelCase )
_lowercase : Dict = inits[j].data_type
_lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _UpperCAmelCase )
total_reduced_size += mem_size
_lowercase : List[Any] = inits[i].name
_lowercase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCAmelCase )
else:
_lowercase : List[str] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase : List[str] = sorted(_UpperCAmelCase )
_remove_dup_initializers_from_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowercase : Union[str, Any] = "optimized_" + model_file_name
_lowercase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
onnx.save(_UpperCAmelCase , _UpperCAmelCase )
return new_model
| 89
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671
| 0
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
a_ = "hopper-medium-v2"
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1_0_0_0
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 712
|
"""simple docstring"""
a_ = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
a_ = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
a_ = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
a_ = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
a_ = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
a_ = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
a_ = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
a_ = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 523
| 0
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = "linear"
UpperCamelCase = "cosine"
UpperCamelCase = "cosine_with_restarts"
UpperCamelCase = "polynomial"
UpperCamelCase = "constant"
UpperCamelCase = "constant_with_warmup"
UpperCamelCase = "piecewise_constant"
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = -1 ):
return LambdaLR(__UpperCAmelCase , lambda lowerCAmelCase__ : 1 , last_epoch=__UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(__UpperCAmelCase )
UpperCAmelCase_ = float(__UpperCAmelCase )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ ):
def rule_func(lowerCAmelCase__ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=-1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.5 , lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1e-7 , lowerCAmelCase__=1.0 , lowerCAmelCase__=-1 ):
UpperCAmelCase_ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = -1 , ):
UpperCAmelCase_ = SchedulerType(__UpperCAmelCase )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 82
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "timesformer"
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=8 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Optional[int] = image_size
lowercase__: Optional[Any] = patch_size
lowercase__: Dict = num_channels
lowercase__: Tuple = num_frames
lowercase__: Any = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: str = qkv_bias
lowercase__: Tuple = attention_type
lowercase__: Tuple = drop_path_rate
| 586
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , a__ = True , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : List[str] = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase : Union[str, Any] = get_size_dict(a__ , default_to_square=a__ )
_lowerCAmelCase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Optional[Any] = get_size_dict(a__ , default_to_square=a__ , param_name="""crop_size""" )
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : Dict = resample
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : Tuple = crop_size
_lowerCAmelCase : List[Any] = do_rescale
_lowerCAmelCase : Optional[Any] = rescale_factor
_lowerCAmelCase : Optional[int] = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase : List[str] = do_convert_rgb
def __A ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
_lowerCAmelCase : Optional[int] = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_lowerCAmelCase : str = get_resize_output_image_size(a__ , size=size["""shortest_edge"""] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : Optional[Any] = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(a__ , size=(size["""height"""], size["""width"""]) , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : Dict = get_size_dict(a__ , param_name="""size""" , default_to_square=a__ )
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Any = get_size_dict(a__ , param_name="""crop_size""" , default_to_square=a__ )
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Any = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : List[Any] = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : List[str] = [to_numpy_array(a__ ) for image in images]
if do_resize:
_lowerCAmelCase : Dict = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
_lowerCAmelCase : str = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_lowerCAmelCase : Union[str, Any] = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_lowerCAmelCase : Optional[int] = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : Dict = {"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 663
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'ViTImageProcessor'
A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str],_A : Optional[Any]=None,_A : List[str]=None,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
def __call__( self : Optional[Any],_A : Any=None,_A : Tuple=None,_A : Dict=None,_A : Optional[Any]=None,**_A : int ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(_A,return_tensors=_A,**_A )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(_A,return_tensors=_A,**_A )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : int,*_A : Optional[Any],**_A : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : Tuple,*_A : Dict,**_A : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",_A,)
return self.image_processor
| 216
|
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for i in range(lowerCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE_ : int = True
if a[i].islower():
SCREAMING_SNAKE_CASE_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Dict = torch.nn.Linear(10 , 10 )
lowercase__ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
lowercase__ : int = Accelerator()
lowercase__ : Dict = accelerator.prepare(lowerCamelCase__ )
try:
pickle.loads(pickle.dumps(lowerCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 128
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ : List[Any] = """lm_head"""
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowercase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ : Dict = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : str = value
elif weight_type == "bias":
lowercase__ : int = value
else:
lowercase__ : Optional[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
lowercase__ : Tuple = []
lowercase__ : Dict = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ : int = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ : Dict = True
if "*" in mapped_key:
lowercase__ : List[str] = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowercase__ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowercase__ : Any = """weight_g"""
elif "weight_v" in name:
lowercase__ : Any = """weight_v"""
elif "bias" in name:
lowercase__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : str = """weight"""
else:
lowercase__ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
lowercase__ : Dict = full_name.split("""conv_layers.""" )[-1]
lowercase__ : Union[str, Any] = name.split(""".""" )
lowercase__ : List[Any] = int(items[0] )
lowercase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[Any]=True ):
if config_path is not None:
lowercase__ : int = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowercase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ : Any = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : int = target_dict.pad_index
lowercase__ : Tuple = target_dict.bos_index
lowercase__ : Dict = target_dict.eos_index
lowercase__ : Dict = len(target_dict.symbols )
lowercase__ : List[Any] = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Any = 42
lowercase__ : List[str] = 43
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowercase__ : List[str] = True if config.feat_extract_norm == """layer""" else False
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowercase__ : Any = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowercase__ : Any = UniSpeechForCTC(lowerCamelCase__ )
else:
lowercase__ : Dict = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowercase__ , lowercase__ , lowercase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ : str = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 128
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__ )
# create a imagenet -> id dictionary for easier use
__lowercase : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase : List[str] = int(lowercase__ )
__lowercase : str = dict(sorted(self.labels.items() ) )
def snake_case ( self : str , lowercase__ : Union[str, List[str]] ):
if not isinstance(lowercase__ , lowercase__ ):
__lowercase : Any = list(lowercase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 5_0 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
__lowercase : Dict = len(lowercase__ )
__lowercase : Tuple = self.transformer.config.sample_size
__lowercase : str = self.transformer.config.in_channels
__lowercase : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__lowercase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase : str = torch.tensor(lowercase__ , device=self.device ).reshape(-1 )
__lowercase : Optional[int] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__lowercase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase : List[Any] = latent_model_input[: len(lowercase__ ) // 2]
__lowercase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowercase : str = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
__lowercase : List[str] = t
if not torch.is_tensor(lowercase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase : List[str] = latent_model_input.device.type == "mps"
if isinstance(lowercase__ , lowercase__ ):
__lowercase : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
__lowercase : Dict = torch.intaa if is_mps else torch.intaa
__lowercase : List[Any] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase : Optional[Any] = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__ ).sample
# perform guidance
if guidance_scale > 1:
__lowercase ,__lowercase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase ,__lowercase : Optional[int] = torch.split(lowercase__ , len(lowercase__ ) // 2 , dim=0 )
__lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase ,__lowercase : int = torch.split(lowercase__ , lowercase__ , dim=1 )
else:
__lowercase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase : Optional[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
if guidance_scale > 1:
__lowercase ,__lowercase : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase : Tuple = latent_model_input
__lowercase : Any = 1 / self.vae.config.scaling_factor * latents
__lowercase : Optional[int] = self.vae.decode(lowercase__ ).sample
__lowercase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : int = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__ )
| 575
|
"""simple docstring"""
import random
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = False ) ->dict:
"""simple docstring"""
__lowercase : dict = {i: [] for i in range(_lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase ):
for j in range(i + 1, _lowerCamelCase ):
if random.random() < probability:
graph[i].append(_lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase )
return graph
def snake_case__ ( _lowerCamelCase ) ->dict:
"""simple docstring"""
return {
i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCAmelCase_ = True
except ImportError:
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( A__ ) -> List[str]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
@staticmethod
def __lowerCamelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[Any] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=SCREAMING_SNAKE_CASE__ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[int] = testing
_snake_case : Tuple = testing_file
_snake_case : List[str] = path
def __lowerCamelCase( self ):
"""simple docstring"""
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_snake_case : str = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
_snake_case : Dict = (
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_snake_case : str = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
_snake_case : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
_snake_case : Tuple = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
_snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = configuration["""lowercase_modelname"""]
_snake_case : str = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''' )
_snake_case : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
_snake_case : Any = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
_snake_case : Union[str, Any] = """Flax""" in generate_tensorflow_pytorch_and_flax
_snake_case : Tuple = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f:
_snake_case : List[str] = f.readlines()
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Create temp file
_snake_case : str = mkstemp()
_snake_case : Any = False
with fdopen(SCREAMING_SNAKE_CASE__ , """w""" ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
_snake_case : str = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
_snake_case : Optional[Any] = []
_snake_case : Tuple = False
_snake_case : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_snake_case : Optional[Any] = line.split("""\"""" )[1]
_snake_case : int = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
_snake_case : Union[str, Any] = line.split("""\"""" )[1]
_snake_case : Any = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = []
elif "# Replace with" in line and "##" not in line:
_snake_case : Any = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 700
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
def __lowerCamelCase( self ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = self._create_example_records()
_snake_case : Optional[int] = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Tuple = self._create_example_records()
_snake_case : Optional[Any] = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCamelCase( self ): # checks what happens with missing columns
"""simple docstring"""
_snake_case : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_snake_case : List[str] = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def __lowerCamelCase( self ): # checks if the type can be inferred from the second record
"""simple docstring"""
_snake_case : List[str] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_snake_case : str = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 519
| 0
|
from __future__ import annotations
__snake_case :Union[str, Any] =tuple[int, int, int]
__snake_case :Tuple =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__snake_case :Tuple ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__snake_case :List[str] ='EGZWVONAHDCLFQMSIPJBYUKXTR'
__snake_case :Any ='FOBHMDKEXQNRAULPGSJVTYICZW'
__snake_case :Union[str, Any] ='ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__snake_case :List[str] ={
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__snake_case :List[Any] ='RMDJXFUWGISLHVTCQNKYPBEZOA'
__snake_case :Tuple ='SGLCPQWZHKXAREONTFBVIYJUDM'
__snake_case :str ='HVSICLTYKQUBXDWAJZOMFGPREN'
__snake_case :int ='RZWQHFMVDBKICJLNTUXAGYPSOE'
__snake_case :Dict ='LFKIJODBEGAMQPXVUHYSTCZRWN'
__snake_case :Any ='KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCamelCase_ ( lowerCAmelCase__ : RotorPositionT , lowerCAmelCase__ : RotorSelectionT , lowerCAmelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
A = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
A , A , A = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
A = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> dict[str, str]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = F'''Plugboard setting isn\'t type string ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
A = F'''Odd number of symbols ({len(lowerCAmelCase__ )})'''
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
A = set()
for i in pbstring:
if i not in abc:
A = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
A = F'''Duplicate symbol ({i})'''
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
A = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
A = pbstring[j + 1]
A = pbstring[j]
return pb
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : RotorPositionT , lowerCAmelCase__ : RotorSelectionT = (rotora, rotora, rotora) , lowerCAmelCase__ : str = "" , ) -> str:
'''simple docstring'''
A = text.upper()
A , A , A = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
A , A , A = rotor_position
A , A , A = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A = plugboard[symbol]
# rotor ra --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A = reflector[symbol]
# 2nd rotors
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Optional[Any] ='This is my Python script that emulates the Enigma machine from WWII.'
__snake_case :Any =(1, 1, 1)
__snake_case :Optional[int] ='pictures'
__snake_case :Dict =(rotora, rotora, rotora)
__snake_case :Union[str, Any] =enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 106
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class _lowercase ( _A ):
def __init__( self , *a , **a ):
super().__init__(*a , **a )
snake_case__ : Optional[Any] ={}
def lowercase__ ( self , a , *a , **a ):
snake_case__ : Any =super().add_tokens(a , *a , **a )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowercase__ ( self , a , *a , a=1 , **a ):
snake_case__ : Optional[int] =[]
if num_vec_per_token == 1:
self.try_adding_tokens(a , *a , **a )
output.append(a )
else:
snake_case__ : Tuple =[]
for i in range(a ):
snake_case__ : str =placeholder_token + F"_{i}"
self.try_adding_tokens(a , *a , **a )
output.append(a )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
snake_case__ : Dict =output
def lowercase__ ( self , a , a=False , a=1.0 ):
if isinstance(a , a ):
snake_case__ : Dict =[]
for i in range(len(a ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case__ : List[str] =self.token_map[placeholder_token]
snake_case__ : Any =tokens[: 1 + int(len(a ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case__ : Optional[int] =copy.copy(a )
random.shuffle(a )
snake_case__ : Dict =text.replace(a , """ """.join(a ) )
return text
def __call__( self , a , *a , a=False , a=1.0 , **a ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
a , vector_shuffle=a , prop_tokens_to_load=a ) , *a , **a , )
def lowercase__ ( self , a , *a , a=False , a=1.0 , **a ):
return super().encode(
self.replace_placeholder_tokens_in_text(
a , vector_shuffle=a , prop_tokens_to_load=a ) , *a , **a , )
| 703
|
def A__ ( _a : list ):
'''simple docstring'''
if len(_a ) <= 1:
return [tuple(_a )]
snake_case__ : Optional[int] =[]
def generate(_a : int , _a : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case__ , snake_case__ : Dict =arr[k - 1], arr[i]
else: # k is odd
snake_case__ , snake_case__ : int =arr[k - 1], arr[0]
generate(k - 1 , _a )
generate(len(_a ) , _a )
return res
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Any = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 448
| 0
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase ( _snake_case=32 ,_snake_case=10 ,_snake_case=100 ,_snake_case=1026 ,_snake_case=True ,_snake_case="data/tokenized_stories_train_wikitext103.jbl" ,_snake_case="igf_context_pairs.jbl" ,):
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase__ : List[str] = generate_datasets(
__lowerCAmelCase ,__lowerCAmelCase ,number=__lowerCAmelCase ,min_len=1026 ,trim=__lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase__ : Any = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
UpperCAmelCase__ : int = load_gpta('gpt2' ).to(__lowerCAmelCase )
print('computing perplexity on objective set' )
UpperCAmelCase__ : List[Any] = compute_perplexity(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).item()
print('perplexity on objective set:' ,__lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase ( _snake_case ,_snake_case=15 ,_snake_case=128 ,_snake_case=100 ,_snake_case="igf_model.pt" ,):
set_seed(42 )
# Load pre-trained model
UpperCAmelCase__ : Dict = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase__ : Optional[int] = SecondaryLearner(__lowerCAmelCase )
# Train secondary learner
UpperCAmelCase__ : Optional[Any] = train_secondary_learner(
__lowerCAmelCase ,__lowerCAmelCase ,max_epochs=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,eval_freq=100 ,igf_model_path=__lowerCAmelCase ,)
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case=32 ,_snake_case=1000 ,_snake_case=16 ,_snake_case=1.0 ,_snake_case=recopy_gpta ,_snake_case=None ,_snake_case=10 ,_snake_case="gpt2_finetuned.pt" ,):
UpperCAmelCase__ : Optional[int] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ : str = RandomSampler(__lowerCAmelCase )
UpperCAmelCase__ : List[str] = DataLoader(__lowerCAmelCase ,sampler=__lowerCAmelCase )
UpperCAmelCase__ : int = max_steps // (len(__lowerCAmelCase )) + 1
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[str] = torch.zeros((1, context_len) ,dtype=torch.long ,device=__lowerCAmelCase )
UpperCAmelCase__ : int = recopy_model(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowerCAmelCase )
secondary_learner.eval()
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : int = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase__ : List[str] = compute_perplexity(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print('Test perplexity, step' ,__lowerCAmelCase ,':' ,__lowerCAmelCase )
for epoch in range(int(__lowerCAmelCase ) ):
for step, example in enumerate(__lowerCAmelCase ):
torch.cuda.empty_cache()
UpperCAmelCase__ : List[Any] = random.randint(0 ,example.size(2 ) - context_len - 1 )
UpperCAmelCase__ : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase__ : Union[str, Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
UpperCAmelCase__ : Any = True
if secondary_learner is not None:
UpperCAmelCase__ : str = secondary_learner.forward(
torch.tensor(__lowerCAmelCase ,dtype=torch.long ,device=__lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase__ : str = -1
if predicted_q < threshold:
UpperCAmelCase__ : Any = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase__ : Optional[int] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase__ : List[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() ,3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase__ : Any = compute_perplexity(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print('Test perplexity, step' ,__lowerCAmelCase ,':' ,__lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() ,__lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase ( ):
UpperCAmelCase__ : Any = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='The input data dir. Should contain data files for WikiText.' ,)
parser.add_argument(
'--model_name_or_path' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='Path to pretrained model or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--data_file' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) ,)
parser.add_argument(
'--igf_data_file' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help='A jbl file containing the context and information gain pairs to train secondary learner.' ,)
parser.add_argument(
'--output_dir' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='The output directory where the final fine-tuned model is stored.' ,)
parser.add_argument(
'--tokenizer_name' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,help='Pretrained tokenizer name or path if not the same as model_name' ,)
parser.add_argument('--seed' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' ,default=32 ,type=__lowerCAmelCase ,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) ,)
parser.add_argument(
'--size_objective_set' ,default=100 ,type=__lowerCAmelCase ,help='number of articles that are long enough to be used as our objective set' ,)
parser.add_argument(
'--eval_freq' ,default=100 ,type=__lowerCAmelCase ,help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' ,default=1000 ,type=__lowerCAmelCase ,help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' ,default=128 ,type=__lowerCAmelCase ,help='batch size of training data for secondary learner' ,)
parser.add_argument(
'--batch_size' ,default=16 ,type=__lowerCAmelCase ,help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' ,default=10 ,type=__lowerCAmelCase ,help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) ,)
parser.add_argument(
'--number' ,default=100 ,type=__lowerCAmelCase ,help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' ,default=1026 ,type=__lowerCAmelCase ,help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' ,default=15 ,type=__lowerCAmelCase ,help='number of epochs to train secondary learner' )
parser.add_argument('--trim' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' ,default=1.0 ,type=__lowerCAmelCase ,help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) ,)
parser.add_argument('--finetuned_model_name' ,default='gpt2_finetuned.pt' ,type=__lowerCAmelCase ,help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,help='Reset the model to the original pretrained GPT-2 weights after each iteration' ,)
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 ,max_steps=10 ,size_objective_set=100 ,min_len=1026 ,trim=__lowerCAmelCase ,data_file='data/tokenized_stories_train_wikitext103.jbl' ,igf_data_file='igf_context_pairs.jbl' ,)
# Load train data for secondary learner
UpperCAmelCase__ : int = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
UpperCAmelCase__ : Dict = training_secondary_learner(
__lowerCAmelCase ,secondary_learner_max_epochs=15 ,secondary_learner_batch_size=128 ,eval_freq=100 ,igf_model_path='igf_model.pt' ,)
# load pretrained gpt2 model
UpperCAmelCase__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase__ : List[Any] = generate_datasets(
context_len=32 ,file='data/tokenized_stories_train_wikitext103.jbl' ,number=100 ,min_len=1026 ,trim=__lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,context_len=32 ,max_steps=1000 ,batch_size=16 ,threshold=1.0 ,recopy_model=__lowerCAmelCase ,secondary_learner=__lowerCAmelCase ,eval_interval=10 ,finetuned_model_name='gpt2_finetuned.pt' ,)
if __name__ == "__main__":
main()
| 110
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _SCREAMING_SNAKE_CASE ( A : str ) -> str:
"""simple docstring"""
re.sub('<n>' , '' , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 61
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
snake_case_ : List[str] = (l + r) // 2
if v[m] >= key:
snake_case_ : List[Any] = m
else:
snake_case_ : Any = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] ):
"""simple docstring"""
if len(snake_case__ ) == 0:
return 0
snake_case_ : Optional[int] = [0] * len(snake_case__ )
snake_case_ : Any = 1
snake_case_ : Optional[int] = v[0]
for i in range(1 , len(snake_case__ ) ):
if v[i] < tail[0]:
snake_case_ : Dict = v[i]
elif v[i] > tail[length - 1]:
snake_case_ : List[str] = v[i]
length += 1
else:
snake_case_ : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__a = logging.getLogger(__name__)
__a = tf.data.AUTOTUNE
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=snake_case__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=snake_case__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=snake_case__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=snake_case__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=snake_case__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=snake_case__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=snake_case__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=snake_case__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=snake_case__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=snake_case__ , default=1e-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=snake_case__ , default=1e-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=snake_case__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=snake_case__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=snake_case__ , required=snake_case__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=snake_case__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: int ):
'''simple docstring'''
try:
if args.tpu_name:
lowercase_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(snake_case__ )
tf.tpu.experimental.initialize_tpu_system(snake_case__ )
return tpu
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = 0
for file in file_list:
lowercase_ = file.split('''/''' )[-1]
lowercase_ = re.search(r'''-\d+-(\d+)\.tfrecord''' , snake_case__ ).group(1 )
lowercase_ = int(snake_case__ )
num_samples += sample_count
return num_samples
def a ( snake_case__: Dict , snake_case__: Dict , snake_case__: Union[str, Any] , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Dict=None ):
'''simple docstring'''
lowercase_ = count_samples(snake_case__ )
lowercase_ = tf.data.Dataset.from_tensor_slices(snake_case__ )
if shuffle:
lowercase_ = dataset.shuffle(len(snake_case__ ) )
lowercase_ = tf.data.TFRecordDataset(snake_case__ , num_parallel_reads=snake_case__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase_ = dataset.apply(tf.data.experimental.assert_cardinality(snake_case__ ) )
lowercase_ = dataset.map(snake_case__ , num_parallel_calls=snake_case__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase_ = dataset.shuffle(args.shuffle_buffer_size )
lowercase_ = dataset.batch(snake_case__ , drop_remainder=snake_case__ )
lowercase_ = dataset.map(snake_case__ , num_parallel_calls=snake_case__ )
lowercase_ = dataset.prefetch(snake_case__ )
return dataset
def a ( snake_case__: Tuple ):
'''simple docstring'''
if not args.no_tpu:
lowercase_ = initialize_tpu(snake_case__ )
lowercase_ = tf.distribute.TPUStrategy(snake_case__ )
else:
lowercase_ = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase_ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase_ = tokenizer.vocab_size
lowercase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase_ = count_samples(snake_case__ )
lowercase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase_ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase_ = TFAutoModelForMaskedLM.from_config(snake_case__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase_ , lowercase_ = create_optimizer(
num_train_steps=snake_case__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=snake_case__ , metrics=['''accuracy'''] )
def decode_fn(snake_case__: Dict ):
lowercase_ = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(snake_case__ , snake_case__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase_ = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm_probability=args.mlm_probability , mlm=snake_case__ , return_tensors='''tf''' )
def mask_with_collator(snake_case__: str ):
# TF really needs an isin() function
lowercase_ = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase_ , lowercase_ = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(snake_case__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=snake_case__ , )
return batch
lowercase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase_ = prepare_dataset(
snake_case__ , decode_fn=snake_case__ , mask_fn=snake_case__ , batch_size=snake_case__ , shuffle=snake_case__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase_ = prepare_dataset(
snake_case__ , decode_fn=snake_case__ , mask_fn=snake_case__ , batch_size=snake_case__ , shuffle=snake_case__ , )
lowercase_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=snake_case__ ) )
model.fit(
snake_case__ , validation_data=snake_case__ , epochs=args.num_epochs , callbacks=snake_case__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__a = parse_args()
main(args)
| 97
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '▁'
lowercase_ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
lowercase_ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
lowercase_ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
lowercase_ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
lowercase_ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = ["input_ids"]
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_INIT_CONFIGURATION
A_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = RESOURCE_FILES_NAMES
def __init__( self : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Any=None, _lowerCamelCase : Dict=False, _lowerCamelCase : Optional[int]="utf8", _lowerCamelCase : List[Any]="[UNK]", _lowerCamelCase : Optional[int]="[SEP]", _lowerCamelCase : str="[PAD]", _lowerCamelCase : List[str]="[CLS]", _lowerCamelCase : str="[MASK]", _lowerCamelCase : Optional[Dict[str, Any]] = None, **_lowerCamelCase : Any, ):
'''simple docstring'''
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, pad_token=_lowerCamelCase, cls_token=_lowerCamelCase, mask_token=_lowerCamelCase, vocab_file=_lowerCamelCase, encoding=_lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **_lowerCamelCase, )
__A = do_lower_case
__A = sentencepiece_model_ckpt
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A = self.load_vocab(filepath=_lowerCamelCase )
else:
__A = {self.sp_model.id_to_piece(_lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
__A = {v: k for k, v in self.vocab.items()}
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if text is None:
return None
__A = self.tokenize(_lowerCamelCase )
__A , __A = '''''', []
for i, ch in enumerate(_lowerCamelCase ):
if ch in self.SP_CHAR_MAPPING:
__A = self.SP_CHAR_MAPPING.get(_lowerCamelCase )
else:
__A = unicodedata.normalize('''NFKC''', _lowerCamelCase )
if self.is_whitespace(_lowerCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCamelCase ) )
__A , __A , __A = normalized_text, [], 0
if self.do_lower_case:
__A = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A = token[1:]
__A = text[offset:].index(_lowerCamelCase ) + offset
__A = start + len(_lowerCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A = end
return token_mapping
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return dict(self.vocab, **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__A = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowerCamelCase, _lowerCamelCase ) for c in text) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : int=False, _lowerCamelCase : Dict=64, _lowerCamelCase : Tuple=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
__A = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
__A = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
__A = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
__A = self.sp_model.EncodeAsPieces(_lowerCamelCase )
else:
__A = self.sp_model.SampleEncodeAsPieces(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__A = []
for pi, piece in enumerate(_lowerCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCamelCase ) and pi != 0:
new_pieces.append(_lowerCamelCase )
continue
else:
continue
__A = 0
for i, chunk in enumerate(_lowerCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCamelCase ) or self.is_punct(_lowerCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCamelCase )
__A = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A = i
if len(_lowerCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Dict ):
'''simple docstring'''
__A = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase, ''' ''' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = self.convert_ids_to_tokens(_lowerCamelCase )
__A = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase, ''' ''' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ):
'''simple docstring'''
return self.vocab.get(_lowerCamelCase, self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ):
'''simple docstring'''
return self.reverse_vocab.get(_lowerCamelCase, self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Dict, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Tuple=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : List[str]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCamelCase ) + 1) + [1] * (len(_lowerCamelCase ) + 3)
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCamelCase ) == 1:
__A = unicodedata.category(_lowerCamelCase )
if cat == "Zs":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = {}
with io.open(_lowerCamelCase, '''r''', encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowerCamelCase ):
__A = line.rstrip('''\n''' )
__A = int(_lowerCamelCase )
return token_to_idx
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__A = 0
if os.path.isdir(_lowerCamelCase ):
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
__A = os.path.join(_lowerCamelCase, '''sentencepiece.bpe.model''' )
with open(_lowerCamelCase, '''wb''' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (vocab_file,)
| 709
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase ( ):
"""simple docstring"""
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(__UpperCamelCase )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(__UpperCamelCase )
__A = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 215
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Dict = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( A__ ):
_lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 228
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
def wrapper(*lowerCAmelCase__ , **lowerCAmelCase__ ):
_lowerCAmelCase : Optional[Any] = timeit.default_timer()
_lowerCAmelCase : Tuple = func(*lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCAmelCase : str = timeit.default_timer() - starttime
return delta
_lowerCAmelCase : str = func.__name__
return wrapper
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=1_00 , lowerCAmelCase__=None ):
"""simple docstring"""
_lowerCAmelCase : int = []
_lowerCAmelCase : Union[str, Any] = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
_lowerCAmelCase : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ , _ArrayXD ):
_lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ , datasets.Value ):
if v.dtype == "string":
_lowerCAmelCase : int = """The small grey turtle was surprisingly fast when challenged."""
else:
_lowerCAmelCase : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ , datasets.Sequence ):
while isinstance(lowerCAmelCase__ , datasets.Sequence ):
_lowerCAmelCase : Union[str, Any] = v.feature
_lowerCAmelCase : Optional[int] = seq_shapes[k]
_lowerCAmelCase : Any = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
_lowerCAmelCase : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1_00 , lowerCAmelCase__=None ):
"""simple docstring"""
_lowerCAmelCase : Any = generate_examples(lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ , path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
_lowerCAmelCase : Tuple = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
_lowerCAmelCase : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_lowerCAmelCase : Tuple = datasets.Dataset.from_file(filename=lowerCAmelCase__ , info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 714
|
import functools
from typing import Any
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
_lowerCAmelCase : dict[str, Any] = {}
_lowerCAmelCase : Optional[int] = "WORD_KEEPER"
for word in words:
_lowerCAmelCase : Optional[int] = trie
for c in word:
if c not in trie_node:
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : Union[str, Any] = trie_node[c]
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Union[str, Any] = len(lowerCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCAmelCase__ ) -> bool:
if index == len_string:
return True
_lowerCAmelCase : Dict = trie
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : Optional[Any] = trie_node.get(string[i] , lowerCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase__ , lowerCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase = random.Random()
def _snake_case ( A , A=1.0 , A=None , A=None ) -> int:
if rng is None:
lowerCAmelCase__ = global_rng
lowerCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=24 , lowerCamelCase_=24 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = min_seq_length
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = do_normalize
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> List[str]:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCAmelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ = np.asarray(lowerCamelCase_ )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
from datasets import load_dataset
lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# fmt: off
lowerCAmelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
| 90
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCamelCase : Any = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
__UpperCamelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : Any = dict(zip(vocab, range(len(vocab))))
__UpperCamelCase : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Optional[Any] = Path(tmpdirname)
__UpperCamelCase : Tuple = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
__UpperCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
__UpperCamelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
__UpperCamelCase : Any = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCamelCase : Tuple = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCamelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCamelCase : str = tokenizer(['Making tiny model'], return_tensors='pt')
__UpperCamelCase : Any = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 248
| 0
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : List[str] = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """sigmoid"""
__snake_case = """softmax"""
__snake_case = """none"""
@add_end_docstrings(
a_ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = False
__snake_case = ClassificationFunction.NONE
def __init__( self , **_lowercase ) -> Optional[Any]:
super().__init__(**_lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def a__ ( self , _lowercase=None , _lowercase=None , _lowercase="" , **_lowercase ) -> List[Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_lowerCamelCase : List[str] = tokenizer_kwargs
_lowerCamelCase : Union[str, Any] = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
_lowerCamelCase : Any = self.model.config.return_all_scores
if isinstance(_lowercase , _lowercase ) or top_k is None:
_lowerCamelCase : Union[str, Any] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _lowercase , )
if return_all_scores:
_lowerCamelCase : Optional[Any] = None
else:
_lowerCamelCase : List[Any] = 1
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : Optional[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_lowercase , **_lowercase ) -> int:
_lowerCamelCase : int = super().__call__(*_lowercase , **_lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[int] = '''top_k''' not in kwargs
if isinstance(args[0] , _lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def a__ ( self , _lowercase , **_lowercase ) -> Dict[str, GenericTensor]:
_lowerCamelCase : str = self.framework
if isinstance(_lowercase , _lowercase ):
return self.tokenizer(**_lowercase , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1 and isinstance(inputs[0] , _lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
def a__ ( self , _lowercase ) -> Union[str, Any]:
return self.model(**_lowercase )
def a__ ( self , _lowercase , _lowercase=None , _lowercase=1 , _lowercase=True ) -> Optional[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : Dict = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
_lowerCamelCase : Optional[Any] = self.model.config.function_to_apply
else:
_lowerCamelCase : Tuple = ClassificationFunction.NONE
_lowerCamelCase : Optional[int] = model_outputs['''logits'''][0]
_lowerCamelCase : str = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : Optional[Any] = sigmoid(_lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : List[Any] = softmax(_lowercase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : str = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k is not None:
_lowerCamelCase : List[str] = dict_scores[:top_k]
return dict_scores
| 558
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """ViltImageProcessor"""
__snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
_lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
_lowerCamelCase : Tuple = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchEncoding:
_lowerCamelCase : Dict = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
_lowerCamelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def a__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Any = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 558
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class snake_case ( _UpperCAmelCase ):
lowerCamelCase__ = 'switch_transformers'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self :str , _lowerCamelCase :Optional[int]=3_2_1_2_8 , _lowerCamelCase :List[Any]=7_6_8 , _lowerCamelCase :str=6_4 , _lowerCamelCase :Dict=2_0_4_8 , _lowerCamelCase :Any=6_4 , _lowerCamelCase :str=1_2 , _lowerCamelCase :Dict=3 , _lowerCamelCase :Tuple=1_2 , _lowerCamelCase :str=3 , _lowerCamelCase :List[Any]=1_2 , _lowerCamelCase :Union[str, Any]=8 , _lowerCamelCase :str=False , _lowerCamelCase :Optional[int]=0.0_1 , _lowerCamelCase :Any="float32" , _lowerCamelCase :str=False , _lowerCamelCase :Tuple=3_2 , _lowerCamelCase :int=1_2_8 , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :Dict=1e-6 , _lowerCamelCase :List[str]=0.0_0_1 , _lowerCamelCase :List[Any]=0.0_0_1 , _lowerCamelCase :List[str]=1.0 , _lowerCamelCase :Any="relu" , _lowerCamelCase :int=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :Tuple=True , _lowerCamelCase :Optional[int]=0 , _lowerCamelCase :List[str]=1 , **_lowerCamelCase :int , ):
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : int = d_model
__SCREAMING_SNAKE_CASE : List[Any] = d_kv
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff
__SCREAMING_SNAKE_CASE : Any = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : Tuple = num_layers
__SCREAMING_SNAKE_CASE : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : int = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : Any = num_heads
__SCREAMING_SNAKE_CASE : Dict = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : str = router_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : str = router_dtype
__SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Optional[int] = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Dict = dropout_rate
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : List[Any] = initializer_factor
__SCREAMING_SNAKE_CASE : Optional[int] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE : List[Any] = add_router_probs
__SCREAMING_SNAKE_CASE : Optional[int] = router_z_loss_coef
__SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : List[Any] = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : List[Any] = act_info[-1]
__SCREAMING_SNAKE_CASE : str = act_info[0] == '''gated'''
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : str = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
| 674
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
a : Tuple = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : int = 'retribert'
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=3_05_22 , __UpperCamelCase : int=7_68 , __UpperCamelCase : Any=8 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[Any]=30_72 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=5_12 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : int=True , __UpperCamelCase : str=1_28 , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = share_encoders
_UpperCAmelCase = projection_dim
| 555
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( lowercase__ ):
__a : List[Any] = 42
__a : Any = None
def snake_case_ (_a : List[Any] , _a : str=0.999 , _a : List[str]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_a : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_a : Optional[Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase = []
for i in range(_lowerCamelCase ):
UpperCAmelCase = i / num_diffusion_timesteps
UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class _a ( lowercase__ , lowercase__ ):
@register_to_config
def __init__( self : int , lowercase : int = 1_000 , lowercase : str = "fixed_small_log" , lowercase : bool = True , lowercase : Optional[float] = 1.0 , lowercase : str = "epsilon" , lowercase : str = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
UpperCAmelCase = betas_for_alpha_bar(lowercase )
UpperCAmelCase = 1.0 - self.betas
UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase = 1.0
# setable values
UpperCAmelCase = None
UpperCAmelCase = torch.from_numpy(np.arange(0 , lowercase )[::-1].copy() )
UpperCAmelCase = variance_type
def A ( self : str , lowercase : torch.FloatTensor , lowercase : Optional[int] = None ):
'''simple docstring'''
return sample
def A ( self : Optional[Any] , lowercase : int , lowercase : Union[str, torch.device] = None ):
'''simple docstring'''
UpperCAmelCase = num_inference_steps
UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase = (np.arange(0 , lowercase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase = torch.from_numpy(lowercase ).to(lowercase )
def A ( self : List[str] , lowercase : str , lowercase : Optional[Any]=None , lowercase : Any=None , lowercase : Optional[Any]=None ):
'''simple docstring'''
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase = torch.log(torch.clamp(lowercase , min=1E-20 ) )
UpperCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase = variance.log()
UpperCAmelCase = beta.log()
UpperCAmelCase = (predicted_variance + 1) / 2
UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Optional[int] = None , lowercase : Union[str, Any]=None , lowercase : bool = True , ):
'''simple docstring'''
UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase = torch.split(lowercase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase = t - 1
UpperCAmelCase = self.alphas_cumprod[t]
UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase = self.betas[t]
UpperCAmelCase = self.alphas[t]
else:
UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase = torch.clamp(
lowercase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase = 0
if t > 0:
UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase , device=model_output.device )
UpperCAmelCase = self._get_variance(
lowercase , predicted_variance=lowercase , prev_timestep=lowercase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase = variance
elif self.variance_type == "learned_range":
UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
''' for the UnCLIPScheduler.''' )
UpperCAmelCase = variance * variance_noise
UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def A ( self : List[Any] , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , lowercase : torch.IntTensor , ):
'''simple docstring'''
UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase = timesteps.to(original_samples.device )
UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 704
|
'''simple docstring'''
from typing import Any
class _a :
def __init__( self : int , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = data
UpperCAmelCase = None
class _a :
def __init__( self : int ):
'''simple docstring'''
UpperCAmelCase = None
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
UpperCAmelCase = temp.next
print()
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = Node(lowercase )
UpperCAmelCase = self.head
UpperCAmelCase = new_node
def A ( self : List[Any] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase , UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
A =LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 358
| 0
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : int = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __lowercase ( __snake_case ):
lowerCamelCase : Tuple = "efficientformer"
def __init__(self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1E-5 , A = "gelu" , A = 0.02 , A = 1E-12 , A = 2_2_4 , A = 1E-05 , **A , ):
super().__init__(**lowerCAmelCase_ )
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Tuple = hidden_sizes
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : Optional[Any] = patch_size
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Optional[int] = depths
lowerCamelCase_ : List[str] = mlp_expansion_ratio
lowerCamelCase_ : Optional[Any] = downsamples
lowerCamelCase_ : Optional[int] = dim
lowerCamelCase_ : Tuple = key_dim
lowerCamelCase_ : Optional[Any] = attention_ratio
lowerCamelCase_ : Any = resolution
lowerCamelCase_ : List[Any] = pool_size
lowerCamelCase_ : Optional[int] = downsample_patch_size
lowerCamelCase_ : Union[str, Any] = downsample_stride
lowerCamelCase_ : Tuple = downsample_pad
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Any = num_metaad_blocks
lowerCamelCase_ : str = distillation
lowerCamelCase_ : Any = use_layer_scale
lowerCamelCase_ : str = layer_scale_init_value
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : Union[str, Any] = batch_norm_eps
| 422
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv2ImageProcessor"""
__lowerCAmelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features["words"]
__lowercase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
__lowercase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
__lowercase = images
return encoded_inputs
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' )
return images_with_overflow
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def snake_case__ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 321
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE : List[str] = "pt"
elif is_tf_available():
SCREAMING_SNAKE_CASE : str = "tf"
else:
SCREAMING_SNAKE_CASE : List[Any] = "jax"
class snake_case ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = ByTaTokenizer
_a = False
def a__ ( self ) -> List[str]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def a__ ( self, **_lowercase ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_lowercase )
def a__ ( self, _lowercase, _lowercase=False, _lowercase=20, _lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_lowercase ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i], clean_up_tokenization_spaces=_lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _lowercase : re.match(R'^[ a-zA-Z]+$', t[1] ), _lowercase ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _lowercase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_lowercase ), _lowercase ) )
if max_length is not None and len(_lowercase ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0:
while len(_lowercase ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_lowercase, clean_up_tokenization_spaces=_lowercase )
if " " not in output_txt and len(_lowercase ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_lowercase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ' ' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
return output_txt, output_ids
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
SCREAMING_SNAKE_CASE_ = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'] )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = 'Unicode €.'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'], _lowercase )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase, 'Unicode €.</s>' )
SCREAMING_SNAKE_CASE_ = tokenizer('e è é ê ë' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'], _lowercase )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase, 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), 'e è é ê ë</s>' )
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, padding=_lowercase, return_tensors=_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowercase, _lowercase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, padding=_lowercase, return_tensors=_lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', _lowercase )
self.assertIn('attention_mask', _lowercase )
self.assertNotIn('decoder_input_ids', _lowercase )
self.assertNotIn('decoder_attention_mask', _lowercase )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_lowercase, max_length=32, padding='max_length', truncation=_lowercase, return_tensors=_lowercase )
self.assertEqual(32, targets['input_ids'].shape[1] )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization. </s>']
SCREAMING_SNAKE_CASE_ = ['Summary of the text. </s>']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, text_target=_lowercase )
self.assertEqual(_lowercase, batch['input_ids'][0] )
self.assertEqual(_lowercase, batch['labels'][0] )
def a__ ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
self.assertListEqual(_lowercase, _lowercase )
shutil.rmtree(_lowercase )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_lowercase, add_special_tokens=_lowercase )
self.assertListEqual(_lowercase, _lowercase )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_lowercase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_lowercase )
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
with open(os.path.join(_lowercase, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
with open(os.path.join(_lowercase, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
SCREAMING_SNAKE_CASE_ = [f"""<extra_id_{i}>""" for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowercase, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_lowercase, _lowercase )
with open(os.path.join(_lowercase, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_lowercase, _lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_lowercase, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=_lowercase )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_lowercase, additional_special_tokens=_lowercase, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_lowercase )
self.assertTrue(tokenizer.decode([255] ) == '' )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_lowercase, do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE_ = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE_ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_lowercase, skip_special_tokens=_lowercase )
for attr in attributes_list:
setattr(_lowercase, attr + '_id', _lowercase )
self.assertEqual(getattr(_lowercase, _lowercase ), _lowercase )
self.assertEqual(getattr(_lowercase, attr + '_id' ), _lowercase )
setattr(_lowercase, attr + '_id', _lowercase )
self.assertEqual(getattr(_lowercase, _lowercase ), _lowercase )
self.assertEqual(getattr(_lowercase, attr + '_id' ), _lowercase )
setattr(_lowercase, 'additional_special_tokens_ids', [] )
self.assertListEqual(getattr(_lowercase, 'additional_special_tokens' ), [] )
self.assertListEqual(getattr(_lowercase, 'additional_special_tokens_ids' ), [] )
setattr(_lowercase, 'additional_special_tokens_ids', [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowercase, 'additional_special_tokens' ), [token_to_test_setters] )
self.assertListEqual(getattr(_lowercase, 'additional_special_tokens_ids' ), [token_id_to_test_setters] )
| 238
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: list[float] ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
SCREAMING_SNAKE_CASE_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCAmelCase__ ) )
return round(lowerCAmelCase__ ,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] )-> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Optional[int]:
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] )-> Union[str, Any]:
A__ = set()
for token in tokens:
A__ = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
A__ = list(UpperCamelCase_ )
return word_list
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : set() )-> Dict:
if not chinese_word_set:
return bert_tokens
A__ = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
A__ = bert_tokens
A__ , A__ = 0, len(UpperCamelCase_ )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
A__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = '''##''' + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : LTP , UpperCamelCase_ : BertTokenizer )-> Optional[int]:
A__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_0_0 ):
A__ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
A__ = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
A__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_0_0 ):
A__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
A__ = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
A__ = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
A__ = [json.dumps(UpperCamelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_lowercase = parser.parse_args()
main(args)
| 632
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase = concatenate_datasets
_lowercase = DownloadConfig
_lowercase = DownloadManager
_lowercase = DownloadMode
_lowercase = DownloadConfig
_lowercase = DownloadMode
_lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 632
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : List[Any] , a : Optional[Any] , a : Optional[int] ) ->Union[str, Any]:
snake_case = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a )
snake_case = downstream_dict['''projector.weight''']
snake_case = downstream_dict['''projector.bias''']
snake_case = downstream_dict['''model.post_net.linear.weight''']
snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def __UpperCamelCase ( a : Tuple , a : int , a : Optional[int] ) ->str:
snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a )
snake_case = downstream_dict['''model.linear.weight''']
snake_case = downstream_dict['''model.linear.bias''']
return model
def __UpperCamelCase ( a : Optional[int] , a : Optional[Any] , a : Optional[int] ) ->Optional[Any]:
snake_case = UniSpeechSatForXVector.from_pretrained(a , config=a )
snake_case = downstream_dict['''connector.weight''']
snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __UpperCamelCase ( a : Any , a : str , a : Any , a : Tuple ) ->List[Any]:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = checkpoint['''Downstream''']
snake_case = UniSpeechSatConfig.from_pretrained(a )
snake_case = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
snake_case = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
snake_case = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
snake_case = convert_xvector(a , a , a )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowercase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 44
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCamelCase = '''main'''
# Default branch name
_UpperCamelCase = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
_UpperCamelCase = '''aaaaaaa'''
# This commit does not exist, so we should 404.
_UpperCamelCase = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCamelCase = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def UpperCamelCase_( ) -> Dict:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCamelCase_( ) -> Optional[int]:
print('Bonjour!' )
yield
print('Au revoir!' )
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase__ (self , __a ) -> Optional[int]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , ['labels'] )
self.assertEqual(find_labels(__a ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__a ) , ['start_positions', 'end_positions'] )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__a ) , ['labels'] )
@require_tf
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , ['labels'] )
self.assertEqual(find_labels(__a ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__a ) , ['start_positions', 'end_positions'] )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__a ) , ['labels'] )
@require_flax
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__a ) , [] )
| 146
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__a ) , 1002 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=__a )
UpperCAmelCase__ = pickle.dumps(__a )
pickle.loads(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.tokenize(__a )
UpperCAmelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCAmelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 146
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(__A : int , __A : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case: List[Any] = update_area_of_max_square(__A , col + 1 )
snake_case: Dict = update_area_of_max_square(row + 1 , col + 1 )
snake_case: List[Any] = update_area_of_max_square(row + 1 , __A )
if mat[row][col]:
snake_case: Optional[int] = 1 + min([right, diagonal, down] )
snake_case: int = max(largest_square_area[0] , __A )
return sub_problem_sol
else:
return 0
snake_case: Tuple = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
__A : int , __A : int , __A : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(__A , col + 1 , __A )
snake_case: Dict = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __A )
snake_case: List[Any] = update_area_of_max_square_using_dp_array(row + 1 , __A , __A )
if mat[row][col]:
snake_case: str = 1 + min([right, diagonal, down] )
snake_case: Any = max(largest_square_area[0] , __A )
snake_case: int = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case: Tuple = [0]
snake_case: Union[str, Any] = [[-1] * cols for _ in range(__A )]
update_area_of_max_square_using_dp_array(0 , 0 , __A )
return largest_square_area[0]
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
snake_case: str = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case: Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case: Optional[int] = dp_array[row][col + 1]
snake_case: Optional[int] = dp_array[row + 1][col + 1]
snake_case: int = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case: Optional[Any] = 1 + min(__A , __A , __A )
snake_case: List[Any] = max(dp_array[row][col] , __A )
else:
snake_case: Optional[Any] = 0
return largest_square_area
def lowerCAmelCase_ ( __A : int , __A : int , __A : list[list[int]] ):
'''simple docstring'''
snake_case: str = [0] * (cols + 1)
snake_case: Union[str, Any] = [0] * (cols + 1)
snake_case: Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case: Any = current_row[col + 1]
snake_case: str = next_row[col + 1]
snake_case: List[str] = next_row[col]
if mat[row][col] == 1:
snake_case: Dict = 1 + min(__A , __A , __A )
snake_case: Dict = max(current_row[col] , __A )
else:
snake_case: str = 0
snake_case: List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 692
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , _A : list[T] , _A : Callable[[T, T], T] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any | T = None
__SCREAMING_SNAKE_CASE : int = len(_A )
__SCREAMING_SNAKE_CASE : list[T] = [any_type for _ in range(self.N )] + arr
__SCREAMING_SNAKE_CASE : Optional[int] = fnc
self.build()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : str , _A : int , _A : T ):
"""simple docstring"""
p += self.N
__SCREAMING_SNAKE_CASE : Tuple = v
while p > 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = p // 2
__SCREAMING_SNAKE_CASE : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : int , _A : int , _A : int ): # noqa: E741
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = l + self.N, r + self.N
__SCREAMING_SNAKE_CASE : T | None = None
while l <= r:
if l % 2 == 1:
__SCREAMING_SNAKE_CASE : Optional[int] = self.st[l] if res is None else self.fn(_A , self.st[l] )
if r % 2 == 0:
__SCREAMING_SNAKE_CASE : int = self.st[r] if res is None else self.fn(_A , self.st[r] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def a__ ( ):
"""simple docstring"""
for i in range(len(snake_case ) ):
for j in range(snake_case , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Any = reduce(snake_case , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : Tuple = reduce(snake_case , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : str = reduce(lambda snake_case , snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case , snake_case )
assert max_range == max_segment_tree.query(snake_case , snake_case )
assert sum_range == sum_segment_tree.query(snake_case , snake_case )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 74
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """openai-gpt"""
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=40478 , lowercase=512 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-5 , lowercase=0.02 , lowercase="cls_index" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , **lowercase , ):
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[str] = n_positions
_lowerCamelCase : Tuple = n_embd
_lowerCamelCase : Optional[int] = n_layer
_lowerCamelCase : List[Any] = n_head
_lowerCamelCase : Dict = afn
_lowerCamelCase : Union[str, Any] = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : List[Any] = attn_pdrop
_lowerCamelCase : Any = layer_norm_epsilon
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[Any] = summary_type
_lowerCamelCase : List[str] = summary_use_proj
_lowerCamelCase : Tuple = summary_activation
_lowerCamelCase : int = summary_first_dropout
_lowerCamelCase : str = summary_proj_to_labels
super().__init__(**lowercase )
| 630
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( a__ ):
__magic_name__ : Dict = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ChineseCLIPImageProcessor"
__magic_name__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Dict )-> Any:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase_ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : int )-> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def a__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any] )-> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def a__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any] )-> Any:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__( self : int )-> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase_ , )
return self.image_processor_class
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = parent
__magic_name__ : List[Any] = config_class
__magic_name__ : Any = has_text_modality
__magic_name__ : str = kwargs
__magic_name__ : Optional[int] = common_properties
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : int = self.config_class(**self.inputs_dict )
__magic_name__ : Union[str, Any] = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCamelCase ):
try:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.parent.assertEqual(
getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCamelCase ):
try:
__magic_name__ : List[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[Any] = self.config_class(**self.inputs_dict )
__magic_name__ : Dict = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCamelCase )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Optional[Any] = os.path.join(lowerCamelCase , '''config.json''' )
config_first.to_json_file(lowerCamelCase )
__magic_name__ : str = self.config_class.from_json_file(lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCamelCase )
__magic_name__ : Optional[int] = self.config_class.from_pretrained(lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : str = self.config_class(**self.inputs_dict )
__magic_name__ : int = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Union[str, Any] = os.path.join(lowerCamelCase , lowerCamelCase )
config_first.save_pretrained(lowerCamelCase )
__magic_name__ : Tuple = self.config_class.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : str = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__magic_name__ : Dict = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
if self.config_class.is_composition:
return
__magic_name__ : int = self.config_class()
self.parent.assertIsNotNone(lowerCamelCase )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = copy.deepcopy(lowerCamelCase )
__magic_name__ : List[Any] = self.config_class(**lowerCamelCase )
__magic_name__ : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowerCamelCase , lowerCamelCase ) != value:
wrong_values.append((key, getattr(lowerCamelCase , lowerCamelCase ), value) )
if len(lowerCamelCase ) > 0:
__magic_name__ : List[Any] = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 154
|
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->list:
"""simple docstring"""
__magic_name__ : Optional[Any] = word.split()
def justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) -> str:
__magic_name__ : int = max_width - width
__magic_name__ : Optional[int] = len(UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
__magic_name__ : List[str] = []
for i in range(UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCAmelCase )
__magic_name__ : List[Any] = []
__magic_name__ : list[str] = []
__magic_name__ : List[str] = 0
for word in words:
if width + len(UpperCAmelCase ) + len(UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCAmelCase )
width += len(UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) )
# reset new line and new width
__magic_name__ , __magic_name__ : Optional[int] = [word], len(UpperCAmelCase )
__magic_name__ : List[str] = max_width - width - len(UpperCAmelCase )
answer.append(''' '''.join(UpperCAmelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 154
| 1
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def A(__a: str ):
lowerCAmelCase_ = VideoMAEConfig()
set_architecture_configs(__a , __a )
if "finetuned" not in model_name:
lowerCAmelCase_ = False
if "finetuned" in model_name:
lowerCAmelCase_ = "huggingface/label-files"
if "kinetics" in model_name:
lowerCAmelCase_ = 400
lowerCAmelCase_ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCAmelCase_ = 174
lowerCAmelCase_ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
lowerCAmelCase_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__a ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A(__a: int , __a: Dict ):
if "small" in model_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
elif "large" in model_name:
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 8
lowerCAmelCase_ = 512
lowerCAmelCase_ = 2048
elif "huge" in model_name:
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 5120
lowerCAmelCase_ = 32
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 8
lowerCAmelCase_ = 640
lowerCAmelCase_ = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def A(__a: List[Any] ):
if "encoder." in name:
lowerCAmelCase_ = name.replace("encoder." , "" )
if "cls_token" in name:
lowerCAmelCase_ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
lowerCAmelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase_ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
lowerCAmelCase_ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCAmelCase_ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
lowerCAmelCase_ = name.replace("attn" , "attention.self" )
if "attn" in name:
lowerCAmelCase_ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
lowerCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCAmelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCAmelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCAmelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
lowerCAmelCase_ = name.replace("head" , "classifier" )
return name
def A(__a: Optional[Any] , __a: Optional[Any] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(__a )
if key.startswith("encoder." ):
lowerCAmelCase_ = key.replace("encoder." , "" )
if "qkv" in key:
lowerCAmelCase_ = key.split("." )
if key.startswith("decoder.blocks" ):
lowerCAmelCase_ = config.decoder_hidden_size
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = config.hidden_size
lowerCAmelCase_ = int(key_split[1] )
lowerCAmelCase_ = "videomae.encoder.layer."
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val
return orig_state_dict
def A():
lowerCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase_ = np.load(__a )
return list(__a )
def A(__a: Optional[Any] , __a: Tuple , __a: Any , __a: Optional[Any] ):
lowerCAmelCase_ = get_videomae_config(__a )
if "finetuned" in model_name:
lowerCAmelCase_ = VideoMAEForVideoClassification(__a )
else:
lowerCAmelCase_ = VideoMAEForPreTraining(__a )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(__a , __a , quiet=__a )
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
if "model" in files:
lowerCAmelCase_ = files["model"]
else:
lowerCAmelCase_ = files["module"]
lowerCAmelCase_ = convert_state_dict(__a , __a )
model.load_state_dict(__a )
model.eval()
# verify model on basic input
lowerCAmelCase_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(__a , return_tensors="pt" )
if "finetuned" not in model_name:
lowerCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCAmelCase_ = torch.load(__a )
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __a , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase_ = outputs.loss
assert torch.allclose(__a , __a , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
model.save_pretrained(__a )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__a , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 226
|
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226
| 1
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_snake_case = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_a : Optional[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ )
_a , _a : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
else:
_a : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ )
_a , _a : int = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
_a : Optional[int] = ["""key_proj""", """value_proj""", """query_proj"""]
_a : List[Any] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_a : Union[str, Any] = key.split(""".""" )
if attributes[0] == "lm_head":
_a : Dict = prophet
_a : Dict = prophet_old
else:
_a : Union[str, Any] = prophet.prophetnet
_a : Any = prophet_old.model
_a : str = False
for attribute in attributes:
if attribute in mapping:
_a : Optional[Any] = mapping[attribute]
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0:
_a : Optional[Any] = attribute
elif hasattr(lowerCamelCase_ , lowerCamelCase_ ):
_a : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : Dict = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
_a : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
_a : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase_ , """in_proj_weight""" ):
_a : List[Any] = old_model.in_proj_weight.shape[0] // 3
_a : Union[str, Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Optional[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
_a : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
_a : int = True
break
if attribute.isdigit():
_a : int = model[int(lowerCamelCase_ )]
_a : Dict = old_model[int(lowerCamelCase_ )]
else:
_a : Any = getattr(lowerCamelCase_ , lowerCamelCase_ )
if old_attribute == "":
_a : List[Any] = old_model
else:
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
_a : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 389
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,image_processor=_lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,threshold=0.0 )
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__lowercase = object_detector(_lowerCamelCase ,threshold=0.0 )
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = 0.9_9_8_5
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''Narsil/layoutlmv3-finetuned-funsd'''
__lowercase = 0.9_9_9_3
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase ,threshold=_lowerCamelCase )
__lowercase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] ,)
| 502
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : str = str(bin(a_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
if shift_amount >= len(a_ ):
return "0b0"
SCREAMING_SNAKE_CASE : Dict = binary_number[: len(a_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE : Tuple = '0' + str(bin(a_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE : Union[str, Any] = len(bin(a_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE : Any = bin(abs(a_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE : Optional[Any] = (
'1' + '0' * (binary_number_length - len(a_ )) + binary_number
)
if shift_amount >= len(a_ ):
return "0b" + binary_number[0] * len(a_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :int = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case_ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
state_dict.pop('''pixel_mean''' , _SCREAMING_SNAKE_CASE )
state_dict.pop('''pixel_std''' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : int = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE : Any = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE : Dict = key.replace('''layers.2''' , '''proj_out''' )
SCREAMING_SNAKE_CASE : str = value
SCREAMING_SNAKE_CASE : Optional[Any] = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any="ybelkada/segment-anything" ):
SCREAMING_SNAKE_CASE : Any = hf_hub_download(_SCREAMING_SNAKE_CASE , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE : Optional[Any] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE : Optional[int] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : str = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = replace_keys(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = hf_model.to('''cuda''' )
SCREAMING_SNAKE_CASE : str = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[[4_00, 6_50]]]
SCREAMING_SNAKE_CASE : Tuple = [[1]]
SCREAMING_SNAKE_CASE : Dict = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
SCREAMING_SNAKE_CASE : Dict = ((75, 2_75, 17_25, 8_50),)
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE : Tuple = [[[4_00, 6_50], [8_00, 6_50]]]
SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 1]]
SCREAMING_SNAKE_CASE : List[str] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
snake_case_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
snake_case_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 507
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self : Optional[int], __lowercase : str, __lowercase : str=13, __lowercase : List[Any]=7, __lowercase : Any=True, __lowercase : Optional[int]=True, __lowercase : int=True, __lowercase : Any=True, __lowercase : str=99, __lowercase : Optional[Any]=24, __lowercase : int=2, __lowercase : Optional[Any]=6, __lowercase : Optional[int]=37, __lowercase : int="gelu", __lowercase : Dict=0.1, __lowercase : str=0.1, __lowercase : str=512, __lowercase : int=16, __lowercase : Union[str, Any]=2, __lowercase : int=0.02, __lowercase : str=3, __lowercase : Optional[Any]=None, __lowercase : Optional[Any]=1000, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = range_bbox
def A__ ( self : List[str] ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = None
if self.use_input_mask:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self : Dict ):
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def A__ ( self : Tuple, __lowercase : Optional[int], __lowercase : List[Any], __lowercase : int, __lowercase : int, __lowercase : Union[str, Any], __lowercase : Any, __lowercase : str, ):
lowercase__ = LiltModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
lowercase__ = model(__lowercase, bbox=__lowercase, token_type_ids=__lowercase )
lowercase__ = model(__lowercase, bbox=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : Optional[int], __lowercase : List[str], __lowercase : Tuple, __lowercase : List[Any], __lowercase : List[str], __lowercase : str, ):
lowercase__ = self.num_labels
lowercase__ = LiltForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(
__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Dict, __lowercase : List[Any], __lowercase : Union[str, Any], __lowercase : int, __lowercase : Tuple, __lowercase : Optional[int], __lowercase : Any, __lowercase : Dict, ):
lowercase__ = LiltForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(
__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, start_positions=__lowercase, end_positions=__lowercase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A__ ( self : Any ):
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : str =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Optional[int] =(
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : Dict =False
def A__ ( self : int, __lowercase : Optional[Any], __lowercase : List[str], __lowercase : List[Any], __lowercase : int, __lowercase : str ):
return True
def A__ ( self : List[str] ):
lowercase__ = LiltModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def A__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def A__ ( self : Optional[int] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LiltModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[Any] ):
lowercase__ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowercase )
lowercase__ = torch.tensor([[1, 2]], device=__lowercase )
lowercase__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(input_ids=__lowercase, bbox=__lowercase )
lowercase__ = torch.Size([1, 2, 768] )
lowercase__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=__lowercase, )
self.assertTrue(outputs.last_hidden_state.shape, __lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __lowercase, atol=1e-3 ) )
| 37
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ : int =False
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_2
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_2
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 3_2
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_2
SCREAMING_SNAKE_CASE_ : Any = 1_2
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'attention_bias': True,
'cross_attention_dim': 3_2,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 3_2,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TransformeraDModel(**lowerCAmelCase__ )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_vqvae
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : str = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : int = self.dummy_transformer
SCREAMING_SNAKE_CASE_ : List[Any] = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE_ : Tuple = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 'teddy bear playing in the pool'
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE_ : Tuple = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type='np' , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'cpu'
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_vqvae
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_transformer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
SCREAMING_SNAKE_CASE_ : Dict = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 'teddy bear playing in the pool'
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type='np' , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
SCREAMING_SNAKE_CASE_ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE_ : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 101
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.convolution(self.padding(lowercase ) )
A_ : Optional[Any] = self.normalization(lowercase )
A_ : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = config.num_channels
A_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Dict = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Any = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.pooler(lowercase )
for layer_module in self.attention:
A_ : List[str] = layer_module(lowercase )
A_ : str = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : int = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Optional[int] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : str = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Tuple = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = config
A_ : List[str] = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : Dict = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.embedder(lowercase , training=lowercase )
A_ : int = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = encoder_outputs[0]
A_ : Any = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Dict = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Union[str, Any] = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : str = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Dict = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Union[str, Any] = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 558
| 0
|
import sys
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = len(_lowerCamelCase )
A : Union[str, Any] = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
A : Tuple = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
A : Any = a + chain_length - 1
A : Dict = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
A : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A : Any = cost
A : Optional[Any] = c
return matrix, sol
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if i == j:
print("A" + str(_lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(")" , end=" " )
def UpperCAmelCase ( ):
A : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
A : int = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A : Optional[int] = matrix_chain_order(_lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 17
|
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
| 17
| 1
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict ={
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """xlm-prophetnet"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , _lowercase = 0.1 , _lowercase = "gelu" , _lowercase = 30522 , _lowercase = 1024 , _lowercase = 4096 , _lowercase = 12 , _lowercase = 16 , _lowercase = 4096 , _lowercase = 12 , _lowercase = 16 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 512 , _lowercase = 0.02 , _lowercase = True , _lowercase = True , _lowercase = 0 , _lowercase = 2 , _lowercase = 32 , _lowercase = 128 , _lowercase = False , _lowercase = 0.0 , _lowercase = True , _lowercase = 0 , _lowercase = 1 , _lowercase = 2 , **_lowercase , ) -> List[str]:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = encoder_ffn_dim
_lowerCamelCase : List[Any] = num_encoder_layers
_lowerCamelCase : Any = num_encoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = num_decoder_layers
_lowerCamelCase : int = num_decoder_attention_heads
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = init_std # Normal(0, this parameter)
_lowerCamelCase : str = activation_function
# parameters for xlmprophetnet
_lowerCamelCase : Optional[Any] = ngram
_lowerCamelCase : Any = num_buckets
_lowerCamelCase : Optional[int] = relative_max_distance
_lowerCamelCase : Optional[int] = disable_ngram_loss
_lowerCamelCase : List[str] = eps
# 3 Types of Dropout
_lowerCamelCase : str = attention_dropout
_lowerCamelCase : Union[str, Any] = activation_dropout
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : Optional[int] = use_cache
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , add_cross_attention=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
@property
def a__ ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def a__ ( self , _lowercase ) -> Optional[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 434
|
"""simple docstring"""
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : str =(((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : int =[2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : Any =len(train_data)
SCREAMING_SNAKE_CASE__ : List[Any] =0.009
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) ->List[str]:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) ->List[str]:
_lowerCamelCase : Tuple = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Optional[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def UpperCamelCase ( ) ->Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : Dict = 0.000002
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = 0
while True:
j += 1
_lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = get_cost_derivative(i - 1 )
_lowerCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ) ->Optional[Any]:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 434
| 1
|
import os
def _lowerCamelCase ( ) -> List[Any]:
_UpperCAmelCase : Optional[int] = os.path.dirname(os.path.realpath(__A ) )
_UpperCAmelCase : Dict = os.path.join(__A , '''triangle.txt''' )
with open(__A ) as f:
_UpperCAmelCase : Tuple = f.readlines()
_UpperCAmelCase : int = []
for line in triangle:
_UpperCAmelCase : Optional[Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__A ) )
a.append(__A )
for i in range(1 , len(__A ) ):
for j in range(len(a[i] ) ):
_UpperCAmelCase : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
_UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__A , __A )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 186
|
from math import pow, sqrt
def _lowerCamelCase ( *__A : float ) -> bool:
_UpperCAmelCase : str = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def _lowerCamelCase ( __A : float , __A : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _lowerCamelCase ( __A : float , __A : float , __A : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowerCamelCase ( __A : float , __A : float , __A : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowerCamelCase ( __A : float , __A : float , __A : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowerCamelCase ( __A : float , __A : float , __A : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 186
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Tuple = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
snake_case_ : List[str] = {
"gpt-neox-20b": 2048,
}
class snake_case_ ( _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __magic_name__ : List[str]=None , __magic_name__ : Any=None , __magic_name__ : int=None , __magic_name__ : Optional[int]="<|endoftext|>" , __magic_name__ : Tuple="<|endoftext|>" , __magic_name__ : Optional[int]="<|endoftext|>" , __magic_name__ : List[Any]=False , **__magic_name__ : Union[str, Any] , ) -> List[Any]:
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase_ : List[str] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
lowerCamelCase_ : Optional[Any] = add_prefix_space
lowerCamelCase_ : Optional[Any] = pre_tok_class(**UpperCAmelCase_ )
lowerCamelCase_ : int = add_prefix_space
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ : Dict = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : "Conversation" ) -> List[int]:
lowerCamelCase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
lowerCamelCase_ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 488
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 0
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase : List[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase : int = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n"
UpperCamelCase : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
}
| 714
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[str] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : str = """LayoutLMv3ImageProcessor"""
a__ : str = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE , )
snake_case : Optional[Any] = kwargs.pop("feature_extractor" )
snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
snake_case : str = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Dict = features["words"]
snake_case : Any = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# add pixel values
snake_case : List[Any] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case : Dict = self.get_overflowing_images(SCREAMING_SNAKE_CASE , encoded_inputs["overflow_to_sample_mapping"] )
snake_case : str = images
return encoded_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(SCREAMING_SNAKE_CASE )} and {len(SCREAMING_SNAKE_CASE )}''' )
return images_with_overflow
def lowerCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 134
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 134
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
a = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
a = {
'''RUCAIBox/mvp''': 1_024,
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase : Optional[Any] = MvpTokenizer
def __init__( self : str , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]="replace" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Optional[int] , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_A = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
_A = add_prefix_space
_A = pre_tok_class(**_UpperCAmelCase )
_A = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_A = 'post_processor'
_A = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state['sep'] )
if "cls" in state:
_A = tuple(state['cls'] )
_A = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(_UpperCAmelCase , state.pop('type' ) )
_A = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict ):
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
_A = value
def lowerCAmelCase_ ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ):
_A = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Dict ):
_A = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
_A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=None ):
_A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 505
|
"""simple docstring"""
import numpy
# List of input, output pairs
a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a = (((515, 22, 13), 555), ((61, 35, 49), 150))
a = [2, 4, 1, 5]
a = len(train_data)
a = 0.0_0_9
def _snake_case ( _snake_case : int , _snake_case : List[Any]="train" ) -> Dict:
'''simple docstring'''
return calculate_hypothesis_value(_snake_case , _snake_case ) - output(
_snake_case , _snake_case )
def _snake_case ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
_A = 0
for i in range(len(_snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( _snake_case : str , _snake_case : Dict=m ) -> Tuple:
'''simple docstring'''
_A = 0
for i in range(_snake_case ):
if index == -1:
summation_value += _error(_snake_case )
else:
summation_value += _error(_snake_case ) * train_data[i][0][index]
return summation_value
def _snake_case ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
_A = summation_of_cost_derivative(_snake_case , _snake_case ) / m
return cost_derivative_value
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_A = 0.000002
_A = 0
_A = 0
while True:
j += 1
_A = [0, 0, 0, 0]
for i in range(0 , len(_snake_case ) ):
_A = get_cost_derivative(i - 1 )
_A = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_snake_case , _snake_case , atol=_snake_case , rtol=_snake_case , ):
break
_A = temp_parameter_vector
print(('Number of iterations:', j) )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
for i in range(len(_snake_case ) ):
print(('Actual output value:', output(_snake_case , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_snake_case , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 505
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCamelCase : Dict = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def snake_case ( lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCamelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCamelCase , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
__lowercase = r""".*sequential.(\d+).*"""
__lowercase = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(lowerCamelCase , lowerCamelCase )
if re.match(lowerCamelCase , lowerCamelCase ):
# replace sequential layers with list
__lowercase = re.match(lowerCamelCase , lowerCamelCase ).group(1 )
__lowercase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCamelCase )//3}.linear.' )
elif re.match(lowerCamelCase , lowerCamelCase ):
__lowercase = int(re.match(lowerCamelCase , lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowercase = 1 if projecton_layer == 0 else 2
__lowercase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowercase = value
__lowercase = mixed_qkv.size(0 ) // 3
__lowercase = mixed_qkv[:qkv_dim]
__lowercase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowercase = mixed_qkv[qkv_dim * 2 :]
__lowercase = query_layer
__lowercase = key_layer
__lowercase = value_layer
else:
__lowercase = value
return model_state_dict
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = init_clap(lowerCamelCase , enable_fusion=lowerCamelCase )
clap_model.eval()
__lowercase = clap_model.state_dict()
__lowercase = rename_state_dict(lowerCamelCase )
__lowercase = ClapConfig()
__lowercase = enable_fusion
__lowercase = ClapModel(lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
transformers_config.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 80
|
"""simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Optional[Any] = {}
_lowercase : List[str] = tokenizer(example["""content"""] , truncation=__UpperCAmelCase )["""input_ids"""]
_lowercase : Union[str, Any] = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase: Dict = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase: Tuple = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase: int = multiprocessing.cpu_count()
UpperCAmelCase: List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase: Tuple = time.time()
UpperCAmelCase: Dict = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
UpperCAmelCase: str = time.time()
UpperCAmelCase: Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
UpperCAmelCase: List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase: List[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[int] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 600
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str = "cpu" , __magic_name__ : Union[str, None] = None ):
"""simple docstring"""
_lowerCAmelCase :Tuple = torch.load(__magic_name__ , map_location=__magic_name__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__magic_name__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCAmelCase :int = v.half()
if save_path is None: # overwrite src_path
_lowerCAmelCase :Dict = src_path
torch.save(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
fire.Fire(convert)
| 687
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 687
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Whether tp freeze the encoder.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Whether to freeze the embeddings.'})
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
SCREAMING_SNAKE_CASE = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
SCREAMING_SNAKE_CASE = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'})
SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'})
SCREAMING_SNAKE_CASE = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Source language id for translation.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Target language id for translation.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': '# num_beams to use for evaluation.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"{split}_results.json" ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
_SCREAMING_SNAKE_CASE = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_SCREAMING_SNAKE_CASE = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
_SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_SCREAMING_SNAKE_CASE = train_result.metrics
_SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix="""val""" )
_SCREAMING_SNAKE_CASE = data_args.n_val
_SCREAMING_SNAKE_CASE = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix="""test""" )
_SCREAMING_SNAKE_CASE = test_output.metrics
_SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
_SCREAMING_SNAKE_CASE = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0
| 0
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = DownBlockaD # noqa F405
lowercase__ = "down"
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ResnetDownsampleBlockaD # noqa F405
lowercase__ = "down"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnDownBlockaD # noqa F405
lowercase__ = "down"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : int = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CrossAttnDownBlockaD # noqa F405
lowercase__ = "down"
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = super().prepare_init_args_and_inputs_for_common()
_snake_case : Optional[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowercase__ = "down"
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = super().prepare_init_args_and_inputs_for_common()
_snake_case : List[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""", """MPS result is not consistent""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = SkipDownBlockaD # noqa F405
lowercase__ = "down"
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnSkipDownBlockaD # noqa F405
lowercase__ = "down"
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = DownEncoderBlockaD # noqa F405
lowercase__ = "down"
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
_snake_case : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnDownEncoderBlockaD # noqa F405
lowercase__ = "down"
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_temb=a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
_snake_case : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = UNetMidBlockaD # noqa F405
lowercase__ = "mid"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
_snake_case : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = UNetMidBlockaDCrossAttn # noqa F405
lowercase__ = "mid"
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : str = super().prepare_init_args_and_inputs_for_common()
_snake_case : Optional[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase__ = "mid"
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_snake_case : Tuple = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = UpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ResnetUpsampleBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CrossAttnUpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = super().prepare_init_args_and_inputs_for_common()
_snake_case : Any = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_, include_encoder_hidden_states=a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = super().prepare_init_args_and_inputs_for_common()
_snake_case : str = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Dict = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnUpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
@unittest.skipIf(torch_device == """mps""", """MPS result is not consistent""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = SkipUpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnSkipUpBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = UpDecoderBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_temb=a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = {"""in_channels""": 32, """out_channels""": 32}
_snake_case : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(a_ )
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AttnUpDecoderBlockaD # noqa F405
lowercase__ = "up"
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[str] = {"""in_channels""": 32, """out_channels""": 32}
_snake_case : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(a_ )
| 609
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "open-llama"
def __init__( self: int, a_: List[str]=100_000, a_: List[str]=4_096, a_: int=11_008, a_: Tuple=32, a_: Any=32, a_: Optional[Any]="silu", a_: Any=2_048, a_: List[Any]=0.02, a_: int=1E-6, a_: Optional[int]=True, a_: List[str]=0, a_: Any=1, a_: Optional[int]=2, a_: Tuple=False, a_: List[Any]=True, a_: Optional[int]=0.1, a_: Tuple=0.1, a_: List[Any]=True, a_: Optional[int]=True, a_: Dict=None, **a_: int, ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Dict = intermediate_size
_snake_case : str = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : Dict = initializer_range
_snake_case : Tuple = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""", a_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_dropout_prob
_snake_case : Optional[int] = use_stable_embedding
_snake_case : int = shared_input_output_embedding
_snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, tie_word_embeddings=a_, **a_, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_snake_case : Optional[int] = self.rope_scaling.get("""type""", a_ )
_snake_case : Optional[int] = self.rope_scaling.get("""factor""", a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(a_, a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 609
| 1
|
"""simple docstring"""
_A = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_A = [{"type": "code", "content": INSTALL_CONTENT}]
_A = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 228
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : torch.FloatTensor
class __UpperCAmelCase ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 7_68 , A_ : Dict=77 , A_ : Union[str, Any]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , )-> Optional[int]:
super().__init__()
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = num_attention_heads * attention_head_dim
__UpperCamelCase = additional_embeddings
__UpperCamelCase = time_embed_dim or inner_dim
__UpperCamelCase = embedding_proj_dim or embedding_dim
__UpperCamelCase = clip_embed_dim or embedding_dim
__UpperCamelCase = Timesteps(A_ , A_ , 0 )
__UpperCamelCase = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
__UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCamelCase = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
__UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
__UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
__UpperCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn="gelu" , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
__UpperCamelCase = nn.LayerNorm(A_ )
elif norm_in_type is None:
__UpperCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCamelCase = nn.LayerNorm(A_ )
__UpperCamelCase = nn.Linear(A_ , A_ )
__UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , A_ , persistent=A_ )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A ( self : Tuple )-> Dict[str, AttentionProcessor]:
__UpperCamelCase = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , "set_processor" ):
__UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def A ( self : Tuple , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] )-> Optional[int]:
__UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Any ):
if hasattr(A_ , "set_processor" ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def A ( self : List[str] )-> List[str]:
self.set_attn_processor(AttnProcessor() )
def A ( self : Dict , A_ : str , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , )-> Any:
__UpperCamelCase = hidden_states.shape[0]
__UpperCamelCase = timestep
if not torch.is_tensor(A_ ):
__UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
__UpperCamelCase = self.embedding_proj_norm(A_ )
__UpperCamelCase = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCamelCase = self.proj_in(A_ )
__UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase = []
__UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase = hidden_states[:, None, :]
__UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
__UpperCamelCase = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCamelCase = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase = self.norm_in(A_ )
for block in self.transformer_blocks:
__UpperCamelCase = block(A_ , attention_mask=A_ )
__UpperCamelCase = self.norm_out(A_ )
if self.prd_embedding is not None:
__UpperCamelCase = hidden_states[:, -1]
else:
__UpperCamelCase = hidden_states[:, additional_embeddings_len:]
__UpperCamelCase = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def A ( self : Dict , A_ : Tuple )-> Dict:
__UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 228
| 1
|
"""simple docstring"""
from typing import Any
class a :
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = data
lowerCamelCase_ = None
def __repr__( self : int ) -> Tuple:
return F'''Node({self.data})'''
class a :
def __init__( self : List[str] ) -> Dict:
lowerCamelCase_ = None
def __iter__( self : Optional[Any] ) -> int:
lowerCamelCase_ = self.head
while node:
yield node.data
lowerCamelCase_ = node.next
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return sum(1 for _ in self )
def __repr__( self : str ) -> Optional[int]:
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __getitem__( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCamelCase_ = self.head
for _ in range(__UpperCAmelCase ):
lowerCamelCase_ = current.next
lowerCamelCase_ = data
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
self.insert_nth(len(self ) , __UpperCAmelCase )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
self.insert_nth(0 , __UpperCAmelCase )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCamelCase_ = Node(__UpperCAmelCase )
if self.head is None:
lowerCamelCase_ = new_node
elif index == 0:
lowerCamelCase_ = self.head # link new_node to head
lowerCamelCase_ = new_node
else:
lowerCamelCase_ = self.head
for _ in range(index - 1 ):
lowerCamelCase_ = temp.next
lowerCamelCase_ = temp.next
lowerCamelCase_ = new_node
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: # print every node data
print(self )
def UpperCamelCase ( self : Optional[int] ) -> str:
return self.delete_nth(0 )
def UpperCamelCase ( self : str ) -> int: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict = 0 ) -> List[str]:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCamelCase_ = self.head # default first node
if index == 0:
lowerCamelCase_ = self.head.next
else:
lowerCamelCase_ = self.head
for _ in range(index - 1 ):
lowerCamelCase_ = temp.next
lowerCamelCase_ = temp.next
lowerCamelCase_ = temp.next.next
return delete_node.data
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
return self.head is None
def UpperCamelCase ( self : str ) -> Tuple:
lowerCamelCase_ = None
lowerCamelCase_ = self.head
while current:
# Store the current node's next node.
lowerCamelCase_ = current.next
# Make the current node's next point backwards
lowerCamelCase_ = prev
# Make the previous node be the current node
lowerCamelCase_ = current
# Make the current node the next node (to progress iteration)
lowerCamelCase_ = next_node
# Return prev in order to put the head at the end
lowerCamelCase_ = prev
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCamelCase_ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCamelCase_ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase_ = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase_ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ) -> List[Any]:
from doctest import testmod
testmod()
lowerCamelCase_ = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowerCamelCase_ = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(F'''length of linked_list is : {len(lowerCAmelCase_ )}''' )
if __name__ == "__main__":
main()
| 549
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
for i in range(0 ,lowerCAmelCase_ ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
for i in range(lowerCAmelCase_ ,0 ,-1 ):
for _ in range(lowerCAmelCase_ ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE = 1
while K:
__SCREAMING_SNAKE_CASE = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 220
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__a , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__a , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__a , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__a , default='''data/dump''' , help='''The dump file prefix.''' )
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE_ : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE_ : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
SCREAMING_SNAKE_CASE_ : List[str] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'{len(__a )} examples to process.' )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_00_00
SCREAMING_SNAKE_CASE_ : str = time.time()
for text in data:
SCREAMING_SNAKE_CASE_ : Any = f'{bos} {text.strip()} {sep}'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(__a , add_special_tokens=__a )
rslt.append(__a )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
SCREAMING_SNAKE_CASE_ : Dict = time.time()
logger.info('''Finished binarization''' )
logger.info(f'{len(__a )} examples processed.' )
SCREAMING_SNAKE_CASE_ : Dict = f'{args.dump_file}.{args.tokenizer_name}.pickle'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE_ : int = [np.uintaa(__a ) for d in rslt]
else:
SCREAMING_SNAKE_CASE_ : List[str] = [np.intaa(__a ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(__a , '''wb''' ) as handle:
pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 176
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
UpperCAmelCase_ : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Tuple = multiprocessing.cpu_count()
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : List[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 176
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
snake_case : Dict = HfApi()
snake_case : int = {}
# fmt: off
snake_case : Any = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
snake_case : str = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
snake_case : Optional[int] = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
snake_case : int = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
snake_case : List[str] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
snake_case : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
snake_case : int = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
snake_case : Tuple = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
snake_case : List[Any] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
snake_case : int = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
snake_case : str = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
snake_case : Any = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
snake_case : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
snake_case : Optional[int] = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
snake_case : Optional[Any] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
snake_case : Dict = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
snake_case : Optional[int] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith("CompVis"):
snake_case : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
snake_case : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
snake_case : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
snake_case : Tuple = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
snake_case : List[str] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 124
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.load(_snake_case , map_location="cpu" )
__magic_name__ : List[Any] = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__magic_name__ : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__magic_name__ : Optional[Any] = v
else:
__magic_name__ : Tuple = v
__magic_name__ : int = chkpt["params"]
__magic_name__ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_snake_case , (torch.FloatTensor, numpy.ndarray) )}
__magic_name__ : Optional[int] = chkpt["dico_word2id"]
__magic_name__ : List[str] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__magic_name__ : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__magic_name__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
__magic_name__ : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 124
| 1
|
"""simple docstring"""
__lowerCAmelCase : Optional[int] =8.314_4598
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowerCAmelCase : List[str] =3_0_0
__lowerCAmelCase : Union[str, Any] =2_8
__lowerCAmelCase : List[str] =rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 197
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'blenderbot-small'
snake_case__ : Optional[Any] = ['past_key_values']
snake_case__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=512 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=512 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase = {0: """batch"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """decoder_sequence"""}
lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(__lowerCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
lowercase = common_inputs["""decoder_input_ids"""].shape[1]
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase = self.num_layers
lowercase = min(__lowerCAmelCase , __lowerCAmelCase )
lowercase = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
lowercase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase , lowercase = self.num_layers
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
lowercase = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
lowercase = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 197
| 1
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def snake_case_( self , A , A , A=None , A=None ) -> int:
_SCREAMING_SNAKE_CASE = self.layer[current_layer](A , A , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> Optional[int]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = BertEncoderWithPabee(A )
self.init_weights()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = threshold
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = patience
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(A )
@add_start_docstrings_to_model_forward(A )
def snake_case_( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=False , ) -> int:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE = input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_SCREAMING_SNAKE_CASE = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(A , device=A )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE = torch.zeros(A , dtype=torch.long , device=A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE = self.get_extended_attention_mask(A , A , A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(A , device=A )
_SCREAMING_SNAKE_CASE = self.invert_attention_mask(A )
else:
_SCREAMING_SNAKE_CASE = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE = self.get_head_mask(A , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE = self.embeddings(
input_ids=A , position_ids=A , token_type_ids=A , inputs_embeds=A )
_SCREAMING_SNAKE_CASE = embedding_output
if self.training:
_SCREAMING_SNAKE_CASE = []
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_SCREAMING_SNAKE_CASE = self.pooler(A )
_SCREAMING_SNAKE_CASE = output_layers[i](output_dropout(A ) )
res.append(A )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE = self.encoder(
A , attention_mask=A , head_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_SCREAMING_SNAKE_CASE = self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE = [output_layers[self.config.num_hidden_layers - 1](A )]
else:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_SCREAMING_SNAKE_CASE = self.pooler(A )
_SCREAMING_SNAKE_CASE = output_layers[i](A )
if regression:
_SCREAMING_SNAKE_CASE = logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = BertModelWithPabee(A )
_SCREAMING_SNAKE_CASE = nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A )
def snake_case_( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.bert(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE = (logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 0
for ix, logits_item in enumerate(A ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE = MSELoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE = (total_loss / total_weights,) + outputs
return outputs
| 314
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ ( snake_case_ ):
'''simple docstring'''
pass
class a_ :
'''simple docstring'''
def __init__( self , A ) -> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self
_SCREAMING_SNAKE_CASE = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A )
yield node.data
_SCREAMING_SNAKE_CASE = node.next_node
@property
def snake_case_( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase_ = Node(1)
lowercase_ = Node(2)
lowercase_ = Node(3)
lowercase_ = Node(4)
print(root_node.has_loop) # False
lowercase_ = root_node.next_node
print(root_node.has_loop) # True
lowercase_ = Node(5)
lowercase_ = Node(6)
lowercase_ = Node(5)
lowercase_ = Node(6)
print(root_node.has_loop) # False
lowercase_ = Node(1)
print(root_node.has_loop) # False
| 314
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__snake_case :List[Any] =random.Random()
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int=1.0 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None ) -> List[str]:
'''simple docstring'''
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any=7 , __UpperCamelCase : Tuple=400 , __UpperCamelCase : Tuple=2_000 , __UpperCamelCase : Tuple=2_048 , __UpperCamelCase : Any=128 , __UpperCamelCase : Any=1 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : Any=30 , __UpperCamelCase : Tuple=44_100 , ) -> Any:
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = spectrogram_length
A = feature_size
A = num_audio_channels
A = hop_length
A = chunk_length
A = sampling_rate
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Dict=False ) -> Dict:
def _flatten(__UpperCamelCase : int ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Union[str, Any] = TvltFeatureExtractor
def __UpperCamelCase ( self : Any ) -> List[str]:
A = TvltFeatureExtractionTester(self )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'feature_size' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'hop_length' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'chunk_length' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'sampling_rate' ) )
def __UpperCamelCase ( self : Tuple ) -> Any:
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
A = feat_extract_first.to_dict()
A = feat_extract_second.to_dict()
A = dict_first.pop('mel_filters' )
A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Any:
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
A = feat_extract_first.to_dict()
A = feat_extract_second.to_dict()
A = dict_first.pop('mel_filters' )
A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# Initialize feature_extractor
A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A = feature_extractor(__UpperCamelCase , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A = feature_extractor(
__UpperCamelCase , return_tensors='np' , sampling_rate=44_100 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A = np.asarray(__UpperCamelCase )
A = feature_extractor(__UpperCamelCase , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A = ds.sort('id' ).select(range(__UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Tuple ) -> Dict:
A = self._load_datasamples(1 )
A = TvltFeatureExtractor()
A = feature_extractor(__UpperCamelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1e-4 ) )
| 717
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[float] ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 267
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ = "▁" ,UpperCAmelCase_ = True ,UpperCAmelCase_ = "<unk>" ,UpperCAmelCase_ = "</s>" ,UpperCAmelCase_ = "<pad>" ,) -> Optional[int]:
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) ," " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens['eos']['token']}''' ,special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] ,)
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> List[str]:
lowercase__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,)
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = [files]
self._tokenizer.train(UpperCAmelCase_ ,trainer=UpperCAmelCase_ )
self.add_unk_id()
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> Union[str, Any]:
lowercase__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,)
self._tokenizer.train_from_iterator(UpperCAmelCase_ ,trainer=UpperCAmelCase_ )
self.add_unk_id()
def _a ( self ) -> str:
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
| 267
| 1
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase__ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _UpperCAmelCase , )
is not None
):
__lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowercase = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
__lowercase = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
__lowercase = True
if not attribute_used:
__lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowercase = True
elif attribute.endswith("_token_id" ):
__lowercase = True
# configuration class specific cases
if not case_allowed:
__lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __lowercase ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
__lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowercase = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
__lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowercase = {}
if len(config_class.attribute_map ) > 0:
__lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowercase = inspect.getsourcefile(_UpperCAmelCase )
__lowercase = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowercase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
__lowercase = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
__lowercase = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
__lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
__lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowercase = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__lowercase = unused_attributes
if len(_UpperCAmelCase ) > 0:
__lowercase = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 576
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , lowerCAmelCase_ ):
return FSMTTokenizer.from_pretrained(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = FSMTForConditionalGeneration.from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowercase = f'''facebook/wmt19-{pair}'''
__lowercase = self.get_tokenizer(lowerCAmelCase_ )
__lowercase = self.get_model(lowerCAmelCase_ )
__lowercase = bleu_data[pair]["src"]
__lowercase = bleu_data[pair]["tgt"]
__lowercase = tokenizer(lowerCAmelCase_ , return_tensors="pt" , truncation=lowerCAmelCase_ , padding="longest" ).to(lowerCAmelCase_ )
__lowercase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowercase = tokenizer.batch_decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
__lowercase = calculate_bleu(lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
self.assertGreaterEqual(scores["bleu"] , lowerCAmelCase_ )
| 576
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A : Union[str, Any] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__A : str = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__A : Dict = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase__ ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[Any] = 0.0
for i, j in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCAmelCase_ , UpperCAmelCase_ ) else 0.0
lowerCAmelCase : str = n_correct / len(UpperCAmelCase_ )
return {
"accuracy": accuracy,
}
| 343
|
from ...processing_utils import ProcessorMixin
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "SpeechT5FeatureExtractor"
lowerCAmelCase_ : Any = "SpeechT5Tokenizer"
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = kwargs.pop('audio' , UpperCAmelCase_ )
lowerCAmelCase : str = kwargs.pop('text' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('text_target' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('audio_target' , UpperCAmelCase_ )
lowerCAmelCase : int = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
elif text is not None:
lowerCAmelCase : List[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : Any = None
if audio_target is not None:
lowerCAmelCase : Tuple = self.feature_extractor(audio_target=UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = targets['input_values']
elif text_target is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = targets['input_ids']
else:
lowerCAmelCase : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Dict = labels
lowerCAmelCase : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def lowercase__ ( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = kwargs.pop('input_values' , UpperCAmelCase_ )
lowerCAmelCase : List[str] = kwargs.pop('input_ids' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('labels' , UpperCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase : List[str] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
elif input_ids is not None:
lowerCAmelCase : Dict = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and "input_ids" in labels[0]):
lowerCAmelCase : int = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Dict = targets['input_ids']
else:
lowerCAmelCase : Any = self.feature_extractor.feature_size
lowerCAmelCase : str = self.feature_extractor.num_mel_bins
lowerCAmelCase : Optional[int] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feature_size_hack
lowerCAmelCase : Tuple = targets['input_values']
else:
lowerCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Union[str, Any] = labels
lowerCAmelCase : List[str] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 343
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Optional[Any]:
if "cls_token" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Any = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : int = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : List[str] ) -> List[Any]:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : str = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE : Tuple = key.split('.' )
SCREAMING_SNAKE_CASE : Dict = int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE : int = config.decoder_hidden_size
SCREAMING_SNAKE_CASE : Any = 'decoder.decoder_layers.'
if "weight" in key:
SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : int = val[:dim]
SCREAMING_SNAKE_CASE : int = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Dict = val[-dim:]
else:
SCREAMING_SNAKE_CASE : str = config.hidden_size
SCREAMING_SNAKE_CASE : Dict = 'vit.encoder.layer.'
if "weight" in key:
SCREAMING_SNAKE_CASE : str = val[:dim, :]
SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : Any = val[:dim]
SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 1024
SCREAMING_SNAKE_CASE : Optional[int] = 4096
SCREAMING_SNAKE_CASE : Any = 24
SCREAMING_SNAKE_CASE : Tuple = 16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = 14
SCREAMING_SNAKE_CASE : Optional[int] = 1280
SCREAMING_SNAKE_CASE : Union[str, Any] = 5120
SCREAMING_SNAKE_CASE : Union[str, Any] = 32
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Dict = ViTMAEForPreTraining(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
SCREAMING_SNAKE_CASE : List[str] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=snake_case_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : int = model(**snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 220
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 16 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Tuple = 'cuda'
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : int = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,return_attention_mask=__SCREAMING_SNAKE_CASE ,).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : List[str] = min(start_index + batch_size ,len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Any = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] ,dim=1 )
SCREAMING_SNAKE_CASE : str = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ).logits
SCREAMING_SNAKE_CASE : int = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Any = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 220
| 1
|
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = """"""
SCREAMING_SNAKE_CASE : Optional[int] = """"""
SCREAMING_SNAKE_CASE : Tuple = []
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE : Any = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = worda
SCREAMING_SNAKE_CASE : str = worda
SCREAMING_SNAKE_CASE : Optional[int] = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = worda
SCREAMING_SNAKE_CASE : Optional[Any] = worda
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE : Any = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE : List[Any] = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE : List[Any] = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE : int = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE : Dict = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE : Dict = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__UpperCAmelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__UpperCAmelCase = input("""Enter the first string: """).strip()
__UpperCAmelCase = input("""Enter the second string: """).strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 379
|
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [False] * len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(SCREAMING_SNAKE_CASE__ )
def dfs(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE__ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE__ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCamelCase : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 711
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : List[str] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Optional[int] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ), id="references" ),
} ), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = compute_bleu(
reference_corpus=_UpperCAmelCase, translation_corpus=_UpperCAmelCase, max_order=_UpperCAmelCase, smooth=_UpperCAmelCase )
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 157
| 0
|
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 105
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 105
| 1
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase = 100_0000 ) -> int:
lowerCAmelCase__ : Tuple = limit + 1
lowerCAmelCase__ : int = [0] * limit
for first_term in range(1 , __UpperCAmelCase ):
for n in range(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase__ : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 507
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_A = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_A = logging.WARNING
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = os.getenv("""DATASETS_VERBOSITY""" , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase_ ( ) -> str:
return __name__.split(""".""" )[0]
def lowercase_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase_ ( ) -> None:
# Apply our default configuration to the library root logger.
lowerCAmelCase__ : Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( __UpperCAmelCase = None ) -> logging.Logger:
if name is None:
lowerCAmelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( __UpperCAmelCase ) -> None:
_get_library_root_logger().setLevel(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> str:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[int] = False
def lowercase_ ( ) -> None:
lowerCAmelCase__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : Dict ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
def empty_fn(*UpperCamelCase : Optional[int] , **UpperCamelCase : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return
_A = True
class _lowerCamelCase :
def __call__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=False , **UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase , **UpperCamelCase )
else:
return EmptyTqdm(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_A = _tqdm_cls()
def lowercase_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ) -> Any:
global _tqdm_active
lowerCAmelCase__ : List[str] = True
def lowercase_ ( ) -> Tuple:
global _tqdm_active
lowerCAmelCase__ : List[str] = False
| 507
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[str] = ['pixel_values']
def __init__( self : List[Any] , snake_case : bool = True , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : Optional[int] , ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(snake_case )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Dict = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : List[str] = resample
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self : int , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Tuple = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : int = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : List[str] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : Any , snake_case : np.ndarray , snake_case : float , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : List[Any] , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : str , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : int = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : str = get_size_dict(snake_case )
if not is_batched(snake_case ):
SCREAMING_SNAKE_CASE : str = [images]
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : str = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
SCREAMING_SNAKE_CASE : Any = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 352
|
def __a ( __lowerCAmelCase ) -> List[str]:
stooge(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return arr
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCAmelCase , i + t , (__lowerCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 352
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
class snake_case ( lowercase , lowercase ):
"""simple docstring"""
_lowerCamelCase = True
@register_to_config
def __init__( self , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = ("DownEncoderBlock2D",) , UpperCamelCase = ("UpDecoderBlock2D",) , UpperCamelCase = (64,) , UpperCamelCase = 1 , UpperCamelCase = "silu" , UpperCamelCase = 4 , UpperCamelCase = 32 , UpperCamelCase = 32 , UpperCamelCase = 0.18_215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCamelCase_ = Encoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , down_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , double_z=UpperCamelCase , )
# pass init params to Decoder
lowerCamelCase_ = Decoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , up_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , norm_num_groups=UpperCamelCase , act_fn=UpperCamelCase , )
lowerCamelCase_ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
lowerCamelCase_ = False
lowerCamelCase_ = False
# only relevant if vae tiling is enabled
lowerCamelCase_ = self.config.sample_size
lowerCamelCase_ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowerCamelCase_ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowerCamelCase_ = 0.25
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if isinstance(UpperCamelCase , (Encoder, Decoder) ):
lowerCamelCase_ = value
def snake_case ( self , UpperCamelCase = True ):
"""simple docstring"""
lowerCamelCase_ = use_tiling
def snake_case ( self ):
"""simple docstring"""
self.enable_tiling(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = True
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {}
def fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , "set_processor" ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , "set_processor" ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase , return_dict=UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
lowerCamelCase_ = [self.encoder(UpperCamelCase ) for x_slice in x.split(1 )]
lowerCamelCase_ = torch.cat(UpperCamelCase )
else:
lowerCamelCase_ = self.encoder(UpperCamelCase )
lowerCamelCase_ = self.quant_conv(UpperCamelCase )
lowerCamelCase_ = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase , return_dict=UpperCamelCase )
lowerCamelCase_ = self.post_quant_conv(UpperCamelCase )
lowerCamelCase_ = self.decoder(UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
@apply_forward_hook
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowerCamelCase_ = [self._decode(UpperCamelCase ).sample for z_slice in z.split(1 )]
lowerCamelCase_ = torch.cat(UpperCamelCase )
else:
lowerCamelCase_ = self._decode(UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = min(a.shape[2] , b.shape[2] , UpperCamelCase )
for y in range(UpperCamelCase ):
lowerCamelCase_ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = min(a.shape[3] , b.shape[3] , UpperCamelCase )
for x in range(UpperCamelCase ):
lowerCamelCase_ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
lowerCamelCase_ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowerCamelCase_ = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowerCamelCase_ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCamelCase_ = []
for i in range(0 , x.shape[2] , UpperCamelCase ):
lowerCamelCase_ = []
for j in range(0 , x.shape[3] , UpperCamelCase ):
lowerCamelCase_ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCamelCase_ = self.encoder(UpperCamelCase )
lowerCamelCase_ = self.quant_conv(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
lowerCamelCase_ = []
for i, row in enumerate(UpperCamelCase ):
lowerCamelCase_ = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCamelCase_ = self.blend_v(rows[i - 1][j] , UpperCamelCase , UpperCamelCase )
if j > 0:
lowerCamelCase_ = self.blend_h(row[j - 1] , UpperCamelCase , UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase , dim=3 ) )
lowerCamelCase_ = torch.cat(UpperCamelCase , dim=2 )
lowerCamelCase_ = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
lowerCamelCase_ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowerCamelCase_ = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowerCamelCase_ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCamelCase_ = []
for i in range(0 , z.shape[2] , UpperCamelCase ):
lowerCamelCase_ = []
for j in range(0 , z.shape[3] , UpperCamelCase ):
lowerCamelCase_ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCamelCase_ = self.post_quant_conv(UpperCamelCase )
lowerCamelCase_ = self.decoder(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
lowerCamelCase_ = []
for i, row in enumerate(UpperCamelCase ):
lowerCamelCase_ = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCamelCase_ = self.blend_v(rows[i - 1][j] , UpperCamelCase , UpperCamelCase )
if j > 0:
lowerCamelCase_ = self.blend_h(row[j - 1] , UpperCamelCase , UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase , dim=3 ) )
lowerCamelCase_ = torch.cat(UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = sample
lowerCamelCase_ = self.encode(UpperCamelCase ).latent_dist
if sample_posterior:
lowerCamelCase_ = posterior.sample(generator=UpperCamelCase )
else:
lowerCamelCase_ = posterior.mode()
lowerCamelCase_ = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
| 445
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 445
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ="""▁"""
__lowerCAmelCase : str ={"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase : Union[str, Any] ={
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCAmelCase : Optional[Any] ={
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__lowerCAmelCase : Tuple =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _A ( __a ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = ["""input_ids""", """attention_mask"""]
snake_case__ : List[int] = []
snake_case__ : List[int] = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase = 1
lowercase = len(self.sp_model )
lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
lowercase = {v: k for k, v in self.lang_code_to_id.items()}
lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase = src_lang if src_lang is not None else """en_XX"""
lowercase = self.lang_code_to_id[self._src_lang]
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase = self.__dict__.copy()
lowercase = None
lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
lowercase = [1] * len(self.prefix_tokens )
lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowercase = src_lang
lowercase = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
lowercase = tgt_lang_id
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """""".join(_lowerCamelCase ).replace(_lowerCamelCase , """ """ ).strip()
return out_string
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = "en_XX" , __lowerCAmelCase = None , __lowerCAmelCase = "ro_RO" , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def A__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.lang_code_to_id[src_lang]
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.lang_code_to_id[lang]
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
| 359
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
__lowercase = np.array(__magic_name__ )
__lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = (1, 2, 1)
__lowercase = (1, 1, 0, 7)
__lowercase = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
__lowercase = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method="nm" )
__lowercase = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
__lowercase = regressor.predict(__magic_name__ )
return y_pred[0]
def lowerCAmelCase__ ( __magic_name__ ) ->float:
train_user.sort()
__lowercase = np.percentile(__magic_name__ , 2_5 )
__lowercase = np.percentile(__magic_name__ , 7_5 )
__lowercase = qa - qa
__lowercase = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->bool:
__lowercase = 0
__lowercase = 0
for i in list_vote:
if i > actual_result:
__lowercase = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowercase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
_lowercase = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
_lowercase = normalize_df[:, 2].tolist()
_lowercase = normalize_df[:, 0].tolist()
_lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowercase = normalize_df[:, [1, 2]].tolist()
_lowercase = x[: len(x) - 1]
_lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
_lowercase = total_date[: len(total_date) - 1]
_lowercase = total_user[: len(total_user) - 1]
_lowercase = total_match[: len(total_match) - 1]
_lowercase = total_date[len(total_date) - 1 :]
_lowercase = total_user[len(total_user) - 1 :]
_lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowercase = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 118
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase__ : str = 1_0
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
for i in range(_snake_case ,_snake_case ):
if array[i] == target:
return i
return -1
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = len(_snake_case )
while left <= right:
if right - left < precision:
return lin_search(_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Any = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE__ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE__ : int = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE__ : List[Any] = two_third + 1
else:
SCREAMING_SNAKE_CASE__ : str = one_third + 1
SCREAMING_SNAKE_CASE__ : Dict = two_third - 1
else:
return -1
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
if left < right:
if right - left < precision:
return lin_search(_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Any = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE__ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_snake_case ,one_third - 1 ,_snake_case ,_snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,_snake_case ,_snake_case ,_snake_case )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,_snake_case ,_snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : int = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase__ : int = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCAmelCase__ : Any = int(input('Enter the number to be found in the list:\n').strip())
UpperCAmelCase__ : Optional[Any] = ite_ternary_search(collection, target)
UpperCAmelCase__ : int = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 545
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Tuple = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = False
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ : Dict = embedding_size
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFMobileBertModel(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 6, 3_05_22]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
| 545
| 1
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE_ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE_ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 373
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Tuple = "ibert"
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1E-12 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=False , lowercase_="none" , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = quant_mode
UpperCAmelCase = force_dequant
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 373
| 1
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
SCREAMING_SNAKE_CASE_ : int = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( ):
A__ = """https://pypi.org/pypi/diffusers/json"""
A__ = json.loads(request.urlopen(_lowercase ).read() )["""releases"""].keys()
return sorted(_lowercase , key=lambda UpperCAmelCase_ : version.Version(_lowercase ) )
def _snake_case ( ):
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
A__ = Path(_lowercase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( UpperCAmelCase_ : Tuple ):
init_hf_modules()
A__ = Path(_lowercase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowercase , exist_ok=_lowercase )
A__ = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( UpperCAmelCase_ : int ):
with open(_lowercase , """r""" , encoding="""utf-8""" ) as f:
A__ = f.read()
# Imports of the form `import .xxx`
A__ = re.findall("""^\s*import\s+\.(\S+)\s*$""" , _lowercase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , _lowercase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowercase ) )
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
A__ = False
A__ = [module_file]
A__ = []
# Let's recurse through all relative imports
while not no_change:
A__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase ) )
A__ = Path(_lowercase ).parent
A__ = [str(module_path / m ) for m in new_imports]
A__ = [f for f in new_import_files if f not in all_relative_imports]
A__ = [F"""{f}.py""" for f in new_import_files]
A__ = len(_lowercase ) == 0
all_relative_imports.extend(_lowercase )
return all_relative_imports
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
with open(_lowercase , """r""" , encoding="""utf-8""" ) as f:
A__ = f.read()
# Imports of the form `import xxx`
A__ = re.findall("""^\s*import\s+(\S+)\s*$""" , _lowercase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , _lowercase , flags=re.MULTILINE )
# Only keep the top-level module
A__ = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A__ = list(set(_lowercase ) )
A__ = []
for imp in imports:
try:
importlib.import_module(_lowercase )
except ImportError:
missing_packages.append(_lowercase )
if len(_lowercase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"""{', '.join(_lowercase )}. Run `pip install {' '.join(_lowercase )}`""" )
return get_relative_imports(_lowercase )
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
A__ = module_path.replace(os.path.sep , """.""" )
A__ = importlib.import_module(_lowercase )
if class_name is None:
return find_pipeline_class(_lowercase )
return getattr(_lowercase , _lowercase )
def _snake_case ( UpperCAmelCase_ : Tuple ):
from ..pipelines import DiffusionPipeline
A__ = dict(inspect.getmembers(_lowercase , inspect.isclass ) )
A__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowercase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
A__ = cls
return pipeline_class
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : Tuple = False , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = False , ):
A__ = str(_lowercase )
A__ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
A__ = module_file_or_url
A__ = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A__ = get_diffusers_versions()
# cut ".dev0"
A__ = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A__ = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A__ = F"""v{revision}"""
elif revision == "main":
A__ = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
A__ = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase )
try:
A__ = cached_download(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
A__ = """git"""
A__ = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A__ = hf_hub_download(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
A__ = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A__ = check_imports(_lowercase )
# Now we move the module inside our cached dynamic modules.
A__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase )
A__ = Path(_lowercase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase , submodule_path / module_file )
for module_needed in modules_needed:
A__ = F"""{module_needed}.py"""
shutil.copy(os.path.join(_lowercase , _lowercase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase , _lowercase ):
A__ = use_auth_token
elif use_auth_token is True:
A__ = HfFolder.get_token()
else:
A__ = None
A__ = model_info(_lowercase , revision=_lowercase , token=_lowercase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A__ = submodule_path / commit_hash
A__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase , F"""{module_needed}.py""" , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return os.path.join(_lowercase , _lowercase )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = False , UpperCAmelCase_ : Dict = False , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : Optional[int] = False , **UpperCAmelCase_ : List[Any] , ):
A__ = get_cached_module_file(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return get_class_in_module(_lowercase , final_module.replace(""".py""" , """""" ) )
| 718
|
"""simple docstring"""
from itertools import count
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * min_block_length
for n in count(UpperCAmelCase_ ):
fill_count_functions.append(1 )
for block_length in range(UpperCAmelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 500
| 0
|
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_SCREAMING_SNAKE_CASE = get_logger()
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE_ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
"""simple docstring"""
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCamelCase = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase = str(jax.devices()[0] )
UpperCamelCase = jnp_array_kwargs
@staticmethod
def lowerCamelCase_ ( ):
"""simple docstring"""
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase = {"""dtype""": jnp.intaa}
else:
UpperCamelCase = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
UpperCamelCase = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
UpperCamelCase = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
UpperCamelCase = self.recursive_tensorize(UpperCamelCase__ )
UpperCamelCase = self._consolidate(UpperCamelCase__ )
return column
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
UpperCamelCase = self.python_features_decoder.decode_batch(UpperCamelCase__ )
UpperCamelCase = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 537
|
"""simple docstring"""
__lowercase : Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__lowercase : Any = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ):
lowerCamelCase_ = from_type.lower().strip('''s''' )
lowerCamelCase_ = to_type.lower().strip('''s''' )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 142
| 0
|
from __future__ import annotations
_UpperCamelCase : int =1.6_021E-19 # units = C
def a__ (__lowercase :float , __lowercase :float , __lowercase :float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a__ (__lowercase :str , __lowercase :int ) -> Dict:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a__ (__lowercase :Union[str, Any] , __lowercase :str , __lowercase :str , __lowercase :str ) -> int:
_A : str = tmp_path / '''cache'''
_A : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a__ (__lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> List[str]:
_A : Union[str, Any] = tmp_path / '''cache'''
_A : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_A : Tuple = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowercase , cache_dir=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
def a__ (__lowercase :Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
_A : List[str] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def a__ (__lowercase :Tuple , __lowercase :List[str] , __lowercase :Tuple ) -> str:
_A : Optional[int] = tmp_path / '''cache'''
_A : Dict = os.path.join(__lowercase , '''tmp.sql''' )
_A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_A : Dict = iter_sql_file(__lowercase )
_A : Any = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Optional[Any] , __lowercase :Tuple , __lowercase :Union[str, Any] ) -> Union[str, Any]:
_A : Optional[Any] = tmp_path / '''cache'''
_A : Union[str, Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_A : Union[str, Any] = iter_sql_file(__lowercase )
_A : str = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> Any:
_A : Optional[int] = tmp_path / '''cache'''
_A : Optional[Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
with pytest.raises(__lowercase ):
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 332
| 0
|
def __A ( _A ):
"""simple docstring"""
__a = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__a = ''''''
__a = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__a = 0, 0
# length[i] shows the length of palindromic substring with center i
__a = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
__a = 0
for j in range(len(_A ) ):
__a = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__a = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__a = j - k + 1 # noqa: E741
__a = j + k - 1
# update max_length and start position
if max_length < length[j]:
__a = length[j]
__a = j
# create that string
__a = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 705
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A_ = logging.get_logger(__name__)
@add_end_docstrings(
snake_case__ , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __lowercase ( snake_case__ ):
def __a ( self : Union[str, Any] , __lowerCamelCase : GenericTensor ) -> str:
'''simple docstring'''
if self.framework == "tf":
lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCamelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __a ( self : Dict , __lowerCamelCase : GenericTensor ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_masked_index(__lowerCamelCase )
lowercase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __a ( self : Union[str, Any] , __lowerCamelCase : GenericTensor ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowerCamelCase )
def __a ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=None , **__lowerCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
if return_tensors is None:
lowercase = self.framework
lowercase = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase )
self.ensure_exactly_one_mask_token(__lowerCamelCase )
return model_inputs
def __a ( self : Any , __lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = self.model(**__lowerCamelCase )
lowercase = model_inputs["input_ids"]
return model_outputs
def __a ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=5 , __lowerCamelCase : Optional[int]=None ) -> List[Any]:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase = target_ids.shape[0]
lowercase = model_outputs["input_ids"][0]
lowercase = model_outputs["logits"]
if self.framework == "tf":
lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase = outputs.numpy()
lowercase = outputs[0, masked_index, :]
lowercase = stable_softmax(__lowerCamelCase , axis=-1 )
if target_ids is not None:
lowercase = tf.gather_nd(tf.squeeze(__lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowercase = tf.expand_dims(__lowerCamelCase , 0 )
lowercase = tf.math.top_k(__lowerCamelCase , k=__lowerCamelCase )
lowercase = topk.values.numpy(), topk.indices.numpy()
else:
lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase = outputs[0, masked_index, :]
lowercase = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase = probs[..., target_ids]
lowercase = probs.topk(__lowerCamelCase )
lowercase = []
lowercase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowercase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowercase = input_ids.numpy().copy()
if target_ids is not None:
lowercase = target_ids[p].tolist()
lowercase = p
# Filter padding out:
lowercase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
lowercase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__lowerCamelCase )
result.append(__lowerCamelCase )
if single_mask:
return result[0]
return result
def __a ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = [targets]
try:
lowercase = self.tokenizer.get_vocab()
except Exception:
lowercase = {}
lowercase = []
for target in targets:
lowercase = vocab.get(__lowerCamelCase , __lowerCamelCase )
if id_ is None:
lowercase = self.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , max_length=1 , truncation=__lowerCamelCase , )["input_ids"]
if len(__lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
lowercase = list(set(__lowerCamelCase ) )
if len(__lowerCamelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase = np.array(__lowerCamelCase )
return target_ids
def __a ( self : Dict , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None ) -> List[str]:
'''simple docstring'''
lowercase = {}
if targets is not None:
lowercase = self.get_target_ids(__lowerCamelCase , __lowerCamelCase )
lowercase = target_ids
if top_k is not None:
lowercase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : str , __lowerCamelCase : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase = super().__call__(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 604
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small")
_UpperCamelCase = tokenizer("Hello there" , return_tensors="np").input_ids
_UpperCamelCase = tokenizer("Hi I am" , return_tensors="np").input_ids
_UpperCamelCase = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(lowercase_ , decoder_input_ids=lowercase_).logits
_UpperCamelCase = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82
| 0
|
class A : # Public class to implement a graph
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =row
UpperCAmelCase_ =col
UpperCAmelCase_ =graph
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase_ =[-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase_ =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> int: # And finally, count all islands.
'''simple docstring'''
UpperCAmelCase_ =[[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase_ =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 54
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54
| 1
|
from math import sqrt
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 231
|
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__: int = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__magic_name__: List[Any] = 10
__magic_name__: List[Any] = 256
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(_A ) < MIN_NUM_TOKENS:
return None
__magic_name__ : Optional[int] = MinHash(num_perm=_A )
for token in set(_A ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_A ) if len(t.strip() ) > 0}
class snake_case__ :
def __init__( self , *,
lowerCAmelCase__ = 0.8_5 , ) -> str:
__magic_name__ : List[str] = duplication_jaccard_threshold
__magic_name__ : str = NUM_PERM
__magic_name__ : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ : Tuple = defaultdict(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[List[Dict]]:
__magic_name__ : List[str] = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ : List[str] = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
__magic_name__ : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : str = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Optional[int] = element
__magic_name__ : str = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_A, max_queue_size=10000 ), chunksize=100, ):
if data is not None:
yield data
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_A ) ), max_queue_size=100 ) ):
di.add(_A, _A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = get_tokens(_A )
__magic_name__ : int = get_tokens(_A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__: List[str] = None
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = []
for elementa in cluster:
__magic_name__ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__magic_name__ : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_A, _A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ : Union[str, Any] = 1
extremes.append(_A )
return extremes
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
global _shared_dataset
__magic_name__ : List[str] = dataset
__magic_name__ : str = []
__magic_name__ : int = partial(_find_cluster_extremes_shared, jaccard_threshold=_A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_A, _A, ), total=len(_A ), ):
extremes_list.append(_A )
return extremes_list
def UpperCamelCase ( _A, _A = 0.85 ):
"""simple docstring"""
__magic_name__ : Tuple = make_duplicate_clusters(_A, _A )
__magic_name__ : int = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__magic_name__ : str = {}
__magic_name__ : int = find_extremes(_A, _A, _A )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ : Dict = element
__magic_name__ : Optional[Any] = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ : List[Any] = dataset.filter(lambda _A, _A : idx not in remove_indices, with_indices=_A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ : Union[str, Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__magic_name__ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(_A )}' )
print(f'Number of duplicate clusters: {len(_A )}' )
print(f'Files in duplicate cluster: {len(_A )}' )
print(f'Unique files in duplicate cluster: {len(_A )}' )
print(f'Filtered dataset size: {len(_A )}' )
return ds_filter, duplicate_clusters
| 324
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class snake_case__ ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowercase__ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowercase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowercase__ : str = "text"
lowercase__ : str = "labels"
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__magic_name__ : Union[str, Any] = copy.deepcopy(self )
__magic_name__ : Optional[int] = self.label_schema.copy()
__magic_name__ : Union[str, Any] = features[self.label_column]
__magic_name__ : int = label_schema
return task_template
@property
def __magic_name__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 324
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__A = """mock-s3-bucket"""
__A = f"s3://{mock_bucket}"
__A = extract_path_from_uri(__lowercase )
assert dataset_path.startswith("""s3://""" ) is False
__A = """./local/path"""
__A = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] ) -> int:
"""simple docstring"""
__A = is_remote_filesystem(__lowercase )
assert is_remote is True
__A = fsspec.filesystem("""file""" )
__A = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , __lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__A = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__A = input_paths[compression_fs_class.protocol]
if input_path is None:
__A = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
__A = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
__A = os.path.basename(__lowercase )
__A = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(__lowercase , """r""" , encoding="""utf-8""" ) as f, open(__lowercase , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__A = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__A = compressed_file_paths[protocol]
__A = """dataset.jsonl"""
__A = f"{protocol}://{member_file_path}::{compressed_file_path}"
__A , *__A = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__A = hf_api.dataset_info(__lowercase , token=__lowercase )
__A = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(__lowercase ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
__A = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 199
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
class __lowercase ( lowercase_ ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase_ : ArgumentParser ):
"""simple docstring"""
__A = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = huggingface_hub.__version__
__A = """not installed"""
__A = """NA"""
if is_torch_available():
import torch
__A = torch.__version__
__A = torch.cuda.is_available()
__A = """not installed"""
if is_transformers_available():
import transformers
__A = transformers.__version__
__A = """not installed"""
if is_accelerate_available():
import accelerate
__A = accelerate.__version__
__A = """not installed"""
if is_xformers_available():
import xformers
__A = xformers.__version__
__A = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase_ ) )
return info
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase_ : str ):
"""simple docstring"""
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 199
| 1
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case : List[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :int = _TestCommandArgs(dataset=UpperCAmelCase_ , all_configs=UpperCAmelCase_ , save_infos=UpperCAmelCase_ )
a :List[Any] = TestCommand(*UpperCAmelCase_ )
test_command.run()
a :List[str] = os.path.join(UpperCAmelCase_ , '''README.md''' )
assert os.path.exists(UpperCAmelCase_ )
a :Any = DatasetInfosDict.from_directory(UpperCAmelCase_ )
a :Optional[Any] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 235_1563,
'''num_examples''': 1_0000,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_8418,
'''num_examples''': 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a , a :Optional[Any] = getattr(dataset_infos['''default'''] , UpperCAmelCase_ ), getattr(expected_dataset_infos['''default'''] , UpperCAmelCase_ )
if key == "num_bytes":
assert is_apercent_close(UpperCAmelCase_ , UpperCAmelCase_ )
elif key == "splits":
assert list(UpperCAmelCase_ ) == list(UpperCAmelCase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 445
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'wavlm'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=320 , _lowerCamelCase=800 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=(512, 512, 512, 512, 1500) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=512 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
a :Union[str, Any] = hidden_size
a :List[Any] = feat_extract_norm
a :Tuple = feat_extract_activation
a :Tuple = list(_lowerCamelCase )
a :Optional[Any] = list(_lowerCamelCase )
a :Optional[Any] = list(_lowerCamelCase )
a :int = conv_bias
a :Tuple = num_buckets
a :Tuple = max_bucket_distance
a :int = num_conv_pos_embeddings
a :List[str] = num_conv_pos_embedding_groups
a :Tuple = len(self.conv_dim )
a :Any = num_hidden_layers
a :Tuple = intermediate_size
a :Any = hidden_act
a :List[str] = num_attention_heads
a :int = hidden_dropout
a :Optional[Any] = attention_dropout
a :Dict = activation_dropout
a :Any = feat_proj_dropout
a :Dict = final_dropout
a :int = layerdrop
a :Tuple = layer_norm_eps
a :Union[str, Any] = initializer_range
a :Union[str, Any] = num_ctc_classes
a :Optional[Any] = vocab_size
a :str = do_stable_layer_norm
a :Dict = use_weighted_layer_sum
a :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :List[Any] = apply_spec_augment
a :Optional[int] = mask_time_prob
a :Optional[Any] = mask_time_length
a :Tuple = mask_time_min_masks
a :Dict = mask_feature_prob
a :List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
a :Tuple = num_codevectors_per_group
a :List[str] = num_codevector_groups
a :List[Any] = contrastive_logits_temperature
a :Union[str, Any] = num_negatives
a :Union[str, Any] = codevector_dim
a :Dict = proj_codevector_dim
a :Union[str, Any] = diversity_loss_weight
# ctc loss
a :Dict = ctc_loss_reduction
a :Optional[int] = ctc_zero_infinity
# adapter
a :Any = add_adapter
a :str = adapter_kernel_size
a :Tuple = adapter_stride
a :Dict = num_adapter_layers
a :Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a :List[Any] = list(_lowerCamelCase )
a :Optional[int] = list(_lowerCamelCase )
a :List[str] = list(_lowerCamelCase )
a :Optional[int] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Optional[Any] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670
|
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670
| 1
|
import operator as op
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : List[str] = []
__snake_case : Optional[int] = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
__snake_case : int = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
__snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
__snake_case : List[Any] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__magic_name__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 576
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "bert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[Any] = position_embedding_type
__snake_case : Dict = use_cache
__snake_case : str = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 576
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = 42
@flax_register_to_config
class snake_case__ ( nn.Module , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = 32
A__ = 4
A__ = 4
A__ = (
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''DownBlock2D''',
)
A__ = ('''UpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''')
A__ = False
A__ = (320, 640, 1_280, 1_280)
A__ = 2
A__ = 8
A__ = None
A__ = 1_280
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = False
def A_ ( self : Optional[int] , __a : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
__snake_case : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case : str = jnp.zeros(__a , dtype=jnp.floataa )
__snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
__snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__snake_case : Optional[Any] = jax.random.split(__a )
__snake_case : Union[str, Any] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__a , __a , __a , __a )["params"]
def A_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = self.block_out_channels
__snake_case : str = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case : Dict = self.num_attention_heads or self.attention_head_dim
# input
__snake_case : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__snake_case : Any = FlaxTimestepEmbedding(__a , dtype=self.dtype )
__snake_case : Any = self.only_cross_attention
if isinstance(__a , __a ):
__snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__a , __a ):
__snake_case : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
__snake_case : Any = []
__snake_case : int = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__snake_case : str = output_channel
__snake_case : int = block_out_channels[i]
__snake_case : Optional[Any] = i == len(__a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case : str = FlaxCrossAttnDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case : Tuple = FlaxDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__a )
__snake_case : int = down_blocks
# mid
__snake_case : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__snake_case : str = []
__snake_case : Any = list(reversed(__a ) )
__snake_case : Optional[Any] = list(reversed(__a ) )
__snake_case : Dict = list(reversed(__a ) )
__snake_case : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__snake_case : Dict = output_channel
__snake_case : Any = reversed_block_out_channels[i]
__snake_case : List[Any] = reversed_block_out_channels[min(i + 1 , len(__a ) - 1 )]
__snake_case : Union[str, Any] = i == len(__a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__snake_case : List[str] = FlaxCrossAttnUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case : Dict = FlaxUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__a )
__snake_case : Dict = output_channel
__snake_case : List[str] = up_blocks
# out
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __a : List[str] , __a : Union[str, Any] , __a : int , __a : List[Any]=None , __a : Tuple=None , __a : bool = True , __a : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
# 1. time
if not isinstance(__a , jnp.ndarray ):
__snake_case : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__snake_case : Any = timesteps.astype(dtype=jnp.floataa )
__snake_case : List[str] = jnp.expand_dims(__a , 0 )
__snake_case : List[str] = self.time_proj(__a )
__snake_case : Tuple = self.time_embedding(__a )
# 2. pre-process
__snake_case : List[Any] = jnp.transpose(__a , (0, 2, 3, 1) )
__snake_case : Dict = self.conv_in(__a )
# 3. down
__snake_case : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(__a , __a ):
__snake_case : Dict = down_block(__a , __a , __a , deterministic=not train )
else:
__snake_case : Dict = down_block(__a , __a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__snake_case : List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a , __a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__snake_case : Optional[int] = new_down_block_res_samples
# 4. mid
__snake_case : Tuple = self.mid_block(__a , __a , __a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__snake_case : Tuple = down_block_res_samples[-(self.layers_per_block + 1) :]
__snake_case : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a , __a ):
__snake_case : int = up_block(
__a , temb=__a , encoder_hidden_states=__a , res_hidden_states_tuple=__a , deterministic=not train , )
else:
__snake_case : Dict = up_block(__a , temb=__a , res_hidden_states_tuple=__a , deterministic=not train )
# 6. post-process
__snake_case : Any = self.conv_norm_out(__a )
__snake_case : Optional[int] = nn.silu(__a )
__snake_case : List[str] = self.conv_out(__a )
__snake_case : Optional[int] = jnp.transpose(__a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a )
| 705
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : int = 'tf'
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
__snake_case : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : Any = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Dict = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Any = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : int = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Tuple = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : int = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Any = FeaturesManager.determine_framework(self.test_model )
| 124
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[Any] ):
__lowerCamelCase : str =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase : Tuple =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def __lowercase ( self :str ):
__lowerCamelCase : Dict =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase : str =get_activation('''gelu''' )
__lowerCamelCase : Tuple =get_activation('''gelu_10''' )
__lowerCamelCase : Optional[int] =torch_builtin(__lowercase )
__lowerCamelCase : Optional[Any] =geluaa(__lowercase )
__lowerCamelCase : List[str] =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowercase ( self :Union[str, Any] ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__lowercase ):
get_activation('''bogus''' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def __lowercase ( self :Dict ):
__lowerCamelCase : List[str] =get_activation('''gelu''' )
__lowerCamelCase : Tuple =1
__lowerCamelCase : List[str] =get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
__lowerCamelCase : Dict =acta.a
| 179
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : str = ["""pixel_values"""]
def __init__( self :str , __lowercase :bool = True , __lowercase :Dict[str, int] = None , __lowercase :float = None , __lowercase :PILImageResampling = PILImageResampling.BILINEAR , __lowercase :bool = True , __lowercase :Union[int, float] = 1 / 255 , __lowercase :bool = True , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , **__lowercase :Tuple , ):
super().__init__(**__lowercase )
__lowerCamelCase : int =size if size is not None else {'''shortest_edge''': 384}
__lowerCamelCase : List[str] =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : Any =do_resize
__lowerCamelCase : List[str] =size
# Default value set here for backwards compatibility where the value in config is None
__lowerCamelCase : Tuple =crop_pct if crop_pct is not None else 224 / 256
__lowerCamelCase : Any =resample
__lowerCamelCase : List[str] =do_rescale
__lowerCamelCase : Dict =rescale_factor
__lowerCamelCase : Any =do_normalize
__lowerCamelCase : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self :Any , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :float , __lowercase :PILImageResampling = PILImageResampling.BICUBIC , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :int , ):
__lowerCamelCase : int =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__lowerCamelCase : Optional[int] =size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCamelCase : int =int(shortest_edge / crop_pct )
__lowerCamelCase : str =get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
__lowerCamelCase : str =resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :np.ndarray , __lowercase :Union[int, float] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :List[Any] , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :str , __lowercase :np.ndarray , __lowercase :Union[float, List[float]] , __lowercase :Union[float, List[float]] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :Optional[int] , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :ImageInput , __lowercase :bool = None , __lowercase :Dict[str, int] = None , __lowercase :float = None , __lowercase :PILImageResampling = None , __lowercase :bool = None , __lowercase :float = None , __lowercase :bool = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[str, TensorType]] = None , __lowercase :ChannelDimension = ChannelDimension.FIRST , **__lowercase :Optional[Any] , ):
__lowerCamelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : int =crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase : List[Any] =resample if resample is not None else self.resample
__lowerCamelCase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] =image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : str =image_std if image_std is not None else self.image_std
__lowerCamelCase : str =size if size is not None else self.size
__lowerCamelCase : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : Optional[int] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase : List[Any] =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
__lowerCamelCase : int =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCamelCase : Union[str, Any] =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCamelCase : Tuple ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 179
| 1
|
class __magic_name__ ( _a):
pass
class __magic_name__ ( _a):
pass
class __magic_name__ :
def __init__( self : Optional[int] ):
UpperCAmelCase = [
[],
[],
[],
]
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__SCREAMING_SNAKE_CASE )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _UpperCAmelCase ( self : List[str] ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Optional[Any] ):
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __magic_name__ :
def __init__( self : Any ):
UpperCAmelCase = []
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : int ):
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase = min(self.queue )
self.queue.remove(__SCREAMING_SNAKE_CASE )
return data
def __str__( self : Optional[Any] ):
return str(self.queue )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 405
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = ShapEImgaImgPipeline
__snake_case : int = ["image"]
__snake_case : List[Any] = ["image"]
__snake_case : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__snake_case : Union[str, Any] = False
@property
def A__ ( self ):
return 32
@property
def A__ ( self ):
return 32
@property
def A__ ( self ):
return self.time_input_dim * 4
@property
def A__ ( self ):
return 8
@property
def A__ ( self ):
torch.manual_seed(0 )
_A : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
_A : List[Any] = CLIPVisionModel(A__ )
return model
@property
def A__ ( self ):
_A : Union[str, Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=A__ ,do_normalize=A__ ,do_resize=A__ ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,)
return image_processor
@property
def A__ ( self ):
torch.manual_seed(0 )
_A : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_A : Union[str, Any] = PriorTransformer(**A__ )
return model
@property
def A__ ( self ):
torch.manual_seed(0 )
_A : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_A : str = ShapERenderer(**A__ )
return model
def A__ ( self ):
_A : Dict = self.dummy_prior
_A : Optional[Any] = self.dummy_image_encoder
_A : Tuple = self.dummy_image_processor
_A : Union[str, Any] = self.dummy_renderer
_A : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1024 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
_A : Any = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0 ):
_A : Tuple = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith('''mps''' ):
_A : int = torch.manual_seed(A__ )
else:
_A : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
_A : Tuple = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def A__ ( self ):
_A : Tuple = '''cpu'''
_A : Optional[Any] = self.get_dummy_components()
_A : str = self.pipeline_class(**A__ )
_A : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
_A : Union[str, Any] = pipe(**self.get_dummy_inputs(A__ ) )
_A : List[str] = output.images[0]
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A : Optional[int] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ):
_A : Union[str, Any] = torch_device == '''cpu'''
_A : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self ):
_A : List[str] = self.get_dummy_components()
_A : Dict = self.pipeline_class(**A__ )
_A : List[str] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
_A : Dict = 1
_A : int = 2
_A : str = self.get_dummy_inputs(A__ )
for key in inputs.keys():
if key in self.batch_params:
_A : List[Any] = batch_size * [inputs[key]]
_A : int = pipe(**A__ ,num_images_per_prompt=A__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
_A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
_A : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
_A : Dict = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
_A : Union[str, Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
_A : Any = torch.Generator(device=A__ ).manual_seed(0 )
_A : Optional[int] = pipe(
A__ ,generator=A__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='''np''' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A__ ,A__ )
| 206
|
from PIL import Image
def a__ (__lowercase :Image , __lowercase :float ) -> Image:
def brightness(__lowercase :int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_UpperCamelCase : Any =change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 206
| 1
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str =field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase : str =field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase : str =field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :Tuple = HfArgumentParser((ModelArguments,) )
(lowercase_ ) :Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase_ :Optional[int] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase_ :List[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase_ :List[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase_ :List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase_ :Optional[Any] = True
lowercase_ :List[Any] = True
lowercase_ :List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_a , decoder_config=_a , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase_ :int = decoder_config.decoder_start_token_id
lowercase_ :List[Any] = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase_ :Optional[int] = decoder_config.bos_token_id
if pad_token_id is None:
lowercase_ :Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase_ :Any = decoder_config.eos_token_id
lowercase_ :Optional[Any] = decoder_start_token_id
lowercase_ :Union[str, Any] = pad_token_id
lowercase_ :int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 701
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE : int = 299_792_458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = symbols("ct x y z")
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(_a ) ** 2 )
def UpperCamelCase ( _a ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(_a ), -gamma(_a ) * beta(_a ), 0, 0],
[-gamma(_a ) * beta(_a ), gamma(_a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase ( _a , _a = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
lowercase_ :Optional[int] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE : List[str] = transform(29_979_245)
print("Example of four vector: ")
print(f"ct' = {four_vector[0]}")
print(f"x' = {four_vector[1]}")
print(f"y' = {four_vector[2]}")
print(f"z' = {four_vector[3]}")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE : Optional[int] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"\n{numerical_vector}")
| 441
| 0
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : Any = int(_SCREAMING_SNAKE_CASE )
assert noofclusters < len(_SCREAMING_SNAKE_CASE )
# Find out the dimensionality
SCREAMING_SNAKE_CASE : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE : str = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
shuffle(_SCREAMING_SNAKE_CASE )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE : Optional[int] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_SCREAMING_SNAKE_CASE )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE : List[str] = tf.placeholder('''float64''' , [dim] )
SCREAMING_SNAKE_CASE : Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE : Optional[int] = [tf.Variable(0 ) for i in range(len(_SCREAMING_SNAKE_CASE ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE : List[str] = tf.placeholder('''int32''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.reduce_mean(_SCREAMING_SNAKE_CASE , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE : int = tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : List[Any] = tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE : int = tf.placeholder('''float''' , [noofclusters] )
SCREAMING_SNAKE_CASE : List[Any] = tf.argmin(_SCREAMING_SNAKE_CASE , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE : List[str] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_SCREAMING_SNAKE_CASE )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE : List[Any] = 1_00
for _ in range(_SCREAMING_SNAKE_CASE ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE : Optional[Any] = [
sess.run(_SCREAMING_SNAKE_CASE , feed_dict={va: vect, va: sess.run(_SCREAMING_SNAKE_CASE )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE : str = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_SCREAMING_SNAKE_CASE ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE : Optional[int] = [
vectors[i]
for i in range(len(_SCREAMING_SNAKE_CASE ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE : Tuple = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={mean_input: array(_SCREAMING_SNAKE_CASE )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE : Optional[Any] = sess.run(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = sess.run(_SCREAMING_SNAKE_CASE )
return centroids, assignments
| 507
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowercase (_SCREAMING_SNAKE_CASE :str = "laptop" ):
SCREAMING_SNAKE_CASE : str = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE : int = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE : Dict = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE : str = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.ha.text
SCREAMING_SNAKE_CASE : Tuple = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE : Any = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE : Any = '''Not available'''
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE : List[str] = ''''''
try:
SCREAMING_SNAKE_CASE : Optional[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE : Optional[Any] = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE : int = ''' '''
SCREAMING_SNAKE_CASE : List[str] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case_ = """headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 507
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = "swin2sr"
_SCREAMING_SNAKE_CASE : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=64 , _lowerCAmelCase=1 , _lowerCAmelCase=3 , _lowerCAmelCase=180 , _lowerCAmelCase=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase=8 , _lowerCAmelCase=2.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=2 , _lowerCAmelCase=1.0 , _lowerCAmelCase="1conv" , _lowerCAmelCase="pixelshuffle" , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
a =image_size
a =patch_size
a =num_channels
a =embed_dim
a =depths
a =len(_lowerCAmelCase )
a =num_heads
a =window_size
a =mlp_ratio
a =qkv_bias
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =drop_path_rate
a =hidden_act
a =use_absolute_embeddings
a =layer_norm_eps
a =initializer_range
a =upscale
a =img_range
a =resi_connection
a =upsampler
| 708
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = 13 , _lowerCAmelCase = 64 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 128 , _lowerCAmelCase=[16, 32, 64, 128] , _lowerCAmelCase = 7 , _lowerCAmelCase = 4 , _lowerCAmelCase = 37 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 10 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 128 , _lowerCAmelCase = [2, 2, 2, 2] , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , ):
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =type_sequence_label_size
a =initializer_range
a =encoder_stride
a =num_attention_outputs
a =embed_dim
a =embed_dim + 1
a =resolution
a =depths
a =hidden_sizes
a =dim
a =mlp_expansion_ratio
def lowerCAmelCase__ ( self ):
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =TFEfficientFormerModel(config=_lowerCAmelCase )
a =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =self.type_sequence_label_size
a =TFEfficientFormerForImageClassification(_lowerCAmelCase )
a =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a =1
a =TFEfficientFormerForImageClassification(_lowerCAmelCase )
a =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ):
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase__ ( self ):
a =TFEfficientFormerModelTester(self )
a =ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(_lowerCAmelCase )
a =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =model_class(_lowerCAmelCase )
a =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
a =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a =getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
a =self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
a =seq_length * self.model_tester.chunk_length
else:
a =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
a =outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
a =getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
a =getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
a =super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
a =getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
a =getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
a =getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
a =getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
a =encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
a =True
a =False
a =True
a =model_class(_lowerCAmelCase )
a =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
a =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(_lowerCAmelCase )
a =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
a =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
a =model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
a ={
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
a =model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( )-> str:
"""simple docstring"""
a =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self ):
a =TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
a =model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
a =tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
a =tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
a =TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
a =model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
a =tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
a =tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 321
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : str , __A : Tuple=7 , __A : List[str]=3 , __A : Optional[Any]=1_8 , __A : int=3_0 , __A : Any=4_0_0 , __A : str=True , __A : Dict=None , __A : List[str]=True , __A : List[Any]=False , __A : int=True , __A : Dict=True , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size if size is not None else {"height": 1_8, "width": 2_0}
_lowercase = do_thumbnail
_lowercase = do_align_axis
_lowercase = do_pad
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
def snake_case ( self : List[str] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = DonutImageProcessor if is_vision_available() else None
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = DonutImageProcessingTester(self )
@property
def snake_case ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_thumbnail" ) )
self.assertTrue(hasattr(__A , "do_align_long_axis" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 2_0} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
# Previous config had dimensions in (width, height) order
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {"height": 8_4, "width": 4_2} )
def snake_case ( self : Dict ):
"""simple docstring"""
pass
@is_flaky()
def snake_case ( self : Optional[int] ):
"""simple docstring"""
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_lowercase = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def snake_case ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_lowercase = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def snake_case ( self : Dict ):
"""simple docstring"""
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_lowercase = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 497
|
'''simple docstring'''
from __future__ import annotations
def A__ ( A_ , A_ ) -> list[str]:
if nth_term == "":
return [""]
_lowercase = int(A_ )
_lowercase = int(A_ )
_lowercase = []
for temp in range(int(A_ ) ):
series.append(F"""1 / {pow(temp + 1 , int(A_ ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : Any = int(input('''Enter the last number (nth term) of the P-Series'''))
__magic_name__ : Dict = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 497
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( _A , _A = None ):
'''simple docstring'''
snake_case_ = word_bank or []
# create a table
snake_case_ = len(_A ) + 1
snake_case_ = []
for _ in range(_A ):
table.append([] )
# seed value
snake_case_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
snake_case_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 139
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if equation(_A ) * equation(_A ) >= 0:
raise ValueError("Wrong space!" )
snake_case_ = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
snake_case_ = c
else:
snake_case_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 139
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase = True
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase = True
if a[i].islower():
lowerCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self , lowercase=32_000 , lowercase=768 , lowercase=12 , lowercase=3_072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=512 , lowercase=4 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=512 , lowercase=3 , lowercase=1 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 532
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : List[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703
|
"""simple docstring"""
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
lowerCamelCase__ = [True] * (num + 1)
lowerCamelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
lowerCamelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 274
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
A = """pytorch_model.bin"""
A = """pytorch_model.bin.index.json"""
A = """adapter_config.json"""
A = """adapter_model.bin"""
A = """adapter_model.safetensors"""
A = """tf_model.h5"""
A = """tf_model.h5.index.json"""
A = """model.ckpt"""
A = """flax_model.msgpack"""
A = """flax_model.msgpack.index.json"""
A = """model.safetensors"""
A = """model.safetensors.index.json"""
A = """config.json"""
A = """preprocessor_config.json"""
A = FEATURE_EXTRACTOR_NAME
A = """generation_config.json"""
A = """modelcard.json"""
A = """▁"""
A = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
A = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
A = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
A = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
if version.parse(UpperCamelCase ) < version.parse(UpperCamelCase ):
if "dev" in min_version:
__UpperCAmelCase : Union[str, Any] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCAmelCase : int = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 77
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = AutoConfig.from_pretrained("gpt2" )
a = GenerationConfig.from_model_config(__lowerCamelCase )
a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
a = GenerationConfig()
a = {
'''max_new_tokens''': 10_24,
'''foo''': '''bar''',
}
a = copy.deepcopy(__lowerCamelCase )
a = generation_config.update(**__lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"} )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = GenerationConfig()
a = '''bar'''
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
a = GenerationConfig.from_model_config(__lowerCamelCase )
assert not hasattr(__lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : int ) -> int:
a = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : str ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> str:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
| 702
|
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662
| 0
|
from __future__ import annotations
from typing import Any
class A :
def __init__( self: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: float = 0 ) -> None:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =row, column
UpperCAmelCase_ =[[default_value for c in range(_lowerCAmelCase )] for r in range(_lowerCAmelCase )]
def __str__( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
UpperCAmelCase_ =0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase_ =max(_lowerCAmelCase , len(str(_lowerCAmelCase ) ) )
UpperCAmelCase_ =F'%{max_element_length}s'
# Make string and return
def single_line(_lowerCAmelCase: list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase_ ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self: Optional[Any] ) -> str:
'''simple docstring'''
return str(self )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: tuple[int, int] ) -> bool:
'''simple docstring'''
if not (isinstance(_lowerCAmelCase , (list, tuple) ) and len(_lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self: int , _lowerCAmelCase: tuple[int, int] ) -> Any:
'''simple docstring'''
assert self.validate_indicies(_lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self: List[str] , _lowerCAmelCase: tuple[int, int] , _lowerCAmelCase: float ) -> None:
'''simple docstring'''
assert self.validate_indicies(_lowerCAmelCase )
UpperCAmelCase_ =value
def __add__( self: Union[str, Any] , _lowerCAmelCase: Matrix ) -> Matrix:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase_ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ =self[r, c] + another[r, c]
return result
def __neg__( self: Dict ) -> Matrix:
'''simple docstring'''
UpperCAmelCase_ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ =-self[r, c]
return result
def __sub__( self: Union[str, Any] , _lowerCAmelCase: Matrix ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self: Union[str, Any] , _lowerCAmelCase: int | float | Matrix ) -> Matrix:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (int, float) ): # Scalar multiplication
UpperCAmelCase_ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ =self[r, c] * another
return result
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase_ =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase_ =F'Unsupported type given for another ({type(_lowerCAmelCase )})'
raise TypeError(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Matrix:
'''simple docstring'''
UpperCAmelCase_ =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ =self[r, c]
return result
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Matrix , _lowerCAmelCase: Matrix ) -> Any:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase_ =v.transpose()
UpperCAmelCase_ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase_ =1
print(F'a^(-1) is {ainv}' )
# u, v
UpperCAmelCase_ =Matrix(3 , 1 , 0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =1, 2, -3
UpperCAmelCase_ =Matrix(3 , 1 , 0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ , lowercase__ )}' )
def a__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 54
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54
| 1
|
'''simple docstring'''
class UpperCamelCase__:
def __init__( self : List[str] , lowerCAmelCase : int )-> None:
"""simple docstring"""
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def a__( lowerCAmelCase : int )-> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def a__( lowerCAmelCase : int )-> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def a__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int )-> None:
"""simple docstring"""
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowerCAmelCase ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_next(lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int )-> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowerCAmelCase )
if left <= current_left:
UpperCAmelCase = max(lowerCAmelCase , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 250
__lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , A_)
__lowerCAmelCase : Any = torch.ones((batch_size, length) , device=A_ , dtype=torch.float) / length
return input_ids, scores
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self._get_tensors(5)
__lowerCAmelCase : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._get_tensors(9)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._get_tensors(10)
self.assertTrue(criteria(A_ , A_))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = MaxLengthCriteria(max_length=10)
__lowerCAmelCase , __lowerCAmelCase : int = self._get_tensors(5)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._get_tensors(9)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._get_tensors(10)
self.assertTrue(criteria(A_ , A_))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
__lowerCAmelCase , __lowerCAmelCase : Dict = self._get_tensors(5)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : Dict = self._get_tensors(9)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._get_tensors(10)
self.assertTrue(criteria(A_ , A_))
__lowerCAmelCase : Any = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self._get_tensors(5)
__lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(A_ , A_))
__lowerCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(A_ , A_))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(A_):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
__lowerCAmelCase : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(A_) , 1)
| 293
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 324
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : List[str] = datasets.load_iris()
_lowerCamelCase : Optional[Any] = np.array(data["data"])
_lowerCamelCase : Tuple = np.array(data["target"])
_lowerCamelCase : int = data["target_names"]
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Any = train_test_split(X, y)
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__=5 ) -> Any:
"""simple docstring"""
UpperCamelCase = zip(A__ , A__ )
# List of distances of all points from the point to be classified
UpperCamelCase = []
for data_point in data:
UpperCamelCase = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 324
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__A = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__A = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Any = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = ["input_ids", "attention_mask"]
a__ : List[int] = []
a__ : List[int] = []
def __init__( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=False , **__UpperCAmelCase : Optional[int] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
UpperCAmelCase_= {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_= legacy_behaviour
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
UpperCAmelCase_= vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_= {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_= 1
UpperCAmelCase_= len(self.sp_model )
UpperCAmelCase_= {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
UpperCAmelCase_= {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_= len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_= {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_= list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_= src_lang if src_lang is not None else """eng_Latn"""
UpperCAmelCase_= self.lang_code_to_id[self._src_lang]
UpperCAmelCase_= tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Tuple:
UpperCAmelCase_= self.__dict__.copy()
UpperCAmelCase_= None
UpperCAmelCase_= self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __UpperCAmelCase : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase_= {}
UpperCAmelCase_= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str ) -> None:
UpperCAmelCase_= new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
UpperCAmelCase_= [1] * len(self.prefix_tokens )
UpperCAmelCase_= [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_= [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase_= src_lang
UpperCAmelCase_= self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= self.convert_tokens_to_ids(__UpperCAmelCase )
UpperCAmelCase_= tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_= {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[str] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_= self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Any ) -> List[str]:
UpperCAmelCase_= """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_= os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase_= self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "eng_Latn" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "fra_Latn" , **__UpperCAmelCase : str , ) -> BatchEncoding:
UpperCAmelCase_= src_lang
UpperCAmelCase_= tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any] ) -> None:
UpperCAmelCase_= self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCAmelCase_= []
UpperCAmelCase_= [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_= [self.cur_lang_code]
UpperCAmelCase_= [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> None:
UpperCAmelCase_= self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCAmelCase_= []
UpperCAmelCase_= [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_= [self.cur_lang_code]
UpperCAmelCase_= [self.eos_token_id]
| 593
|
from math import factorial
def __a ( lowerCAmelCase_ : int = 1_00 ) -> int:
'''simple docstring'''
return sum(int(lowerCAmelCase_ ) for x in str(factorial(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 593
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
lowercase_ = "▁"
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['input_ids', 'attention_mask']
lowerCamelCase = BarthezTokenizer
def __init__( self : Union[str, Any],lowercase_ : Optional[Any]=None,lowercase_ : str=None,lowercase_ : List[Any]="<s>",lowercase_ : Optional[int]="</s>",lowercase_ : Any="</s>",lowercase_ : str="<s>",lowercase_ : Any="<unk>",lowercase_ : List[str]="<pad>",lowercase_ : Union[str, Any]="<mask>",**lowercase_ : Any,)-> Any:
'''simple docstring'''
A__ = AddedToken(lowercase__,lstrip=lowercase__,rstrip=lowercase__ ) if isinstance(lowercase__,lowercase__ ) else mask_token
super().__init__(
lowercase__,tokenizer_file=lowercase__,bos_token=lowercase__,eos_token=lowercase__,unk_token=lowercase__,sep_token=lowercase__,cls_token=lowercase__,pad_token=lowercase__,mask_token=lowercase__,**lowercase__,)
A__ = vocab_file
A__ = False if not self.vocab_file else True
def snake_case__ ( self : Optional[Any],lowercase_ : Any,lowercase_ : str = None )-> Any:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any] = None )-> Optional[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : Any = None )-> str:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase__,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file,lowercase__ )
return (out_vocab_file,)
| 702
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None # sigma(t_i)
@classmethod
def snake_case__ ( cls : Tuple )-> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : str,lowercase_ : float = 0.02,lowercase_ : float = 1_0_0,lowercase_ : float = 1.007,lowercase_ : float = 8_0,lowercase_ : float = 0.05,lowercase_ : float = 5_0,)-> int:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def snake_case__ ( self : Dict,lowercase_ : KarrasVeSchedulerState,lowercase_ : int,lowercase_ : Tuple = () )-> KarrasVeSchedulerState:
'''simple docstring'''
A__ = jnp.arange(0,lowercase_ )[::-1].copy()
A__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase_,schedule=jnp.array(lowercase_,dtype=jnp.floataa ),timesteps=lowercase_,)
def snake_case__ ( self : int,lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : random.KeyArray,)-> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A__ = min(self.config.s_churn / state.num_inference_steps,2**0.5 - 1 )
else:
A__ = 0
# sample eps ~ N(0, S_noise^2 * I)
A__ = random.split(lowercase_,num=1 )
A__ = self.config.s_noise * random.normal(key=lowercase_,shape=sample.shape )
A__ = sigma + gamma * sigma
A__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self : Union[str, Any],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_hat + sigma_hat * model_output
A__ = (sample_hat - pred_original_sample) / sigma_hat
A__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_prev + sigma_prev * model_output
A__ = (sample_prev - pred_original_sample) / sigma_prev
A__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : str,lowercase_ : str,lowercase_ : Any )-> Any:
'''simple docstring'''
raise NotImplementedError()
| 586
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.