code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case_ : Optional[Any] = deepcopy(lowercase__ )
elif os.path.exists(lowercase__ ):
with io.open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Optional[Any] = json.load(lowercase__ )
else:
try:
snake_case_ : Optional[int] = baseaa.urlsafe_baadecode(lowercase__ ).decode("""utf-8""" )
snake_case_ : Tuple = json.loads(lowercase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
snake_case_ : int = config
self.set_stage_and_offload()
def __UpperCamelCase (self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
snake_case_ : Optional[Any] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
snake_case_ : List[Any] = False
if self.is_zeroa() or self.is_zeroa():
snake_case_ : Optional[Any] = set(["""cpu""", """nvme"""] )
snake_case_ : Optional[int] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case_ : Optional[Any] = True
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Dict = self.config
# find the config node of interest if it exists
snake_case_ : Union[str, Any] = ds_key_long.split(""".""" )
snake_case_ : Optional[int] = nodes.pop()
for node in nodes:
snake_case_ : Optional[Any] = config.get(lowercase__ )
if config is None:
return None, ds_key
return config, ds_key
def __UpperCamelCase (self , lowercase__ , lowercase__=None ):
snake_case_ , snake_case_ : Dict = self.find_config_node(lowercase__ )
if config is None:
return default
return config.get(lowercase__ , lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__=False ):
snake_case_ : Any = self.config
# find the config node of interest if it exists
snake_case_ : int = ds_key_long.split(""".""" )
for node in nodes:
snake_case_ : int = config
snake_case_ : int = config.get(lowercase__ )
if config is None:
if must_exist:
raise ValueError(f'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Any = self.get_value(lowercase__ )
return False if value is None else bool(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Dict = self.get_value(lowercase__ )
return False if value is None else not bool(lowercase__ )
def __UpperCamelCase (self ):
return self._stage == 2
def __UpperCamelCase (self ):
return self._stage == 3
def __UpperCamelCase (self ):
return self._offload
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Tuple = engine
def __UpperCamelCase (self , lowercase__ , **lowercase__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase__ , **lowercase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ ):
super().__init__(lowercase__ , device_placement=lowercase__ , scaler=lowercase__ )
snake_case_ : Tuple = hasattr(self.optimizer , """overflow""" )
def __UpperCamelCase (self , lowercase__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __UpperCamelCase (self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __UpperCamelCase (self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ ):
super().__init__(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=0.001 , lowercase__=0 , **lowercase__ ):
snake_case_ : Optional[int] = params
snake_case_ : Optional[int] = lr
snake_case_ : Dict = weight_decay
snake_case_ : Optional[int] = kwargs
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=None , lowercase__=0 , **lowercase__ ):
snake_case_ : Dict = optimizer
snake_case_ : Optional[Any] = total_num_steps
snake_case_ : List[str] = warmup_num_steps
snake_case_ : Any = kwargs
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None ):
if components is None:
snake_case_ : Any = []
snake_case_ : Dict = list(lowercase__ )
def __len__(self ):
return len(self.__components )
def __str__(self ):
return "(" + ",".join(map(lowercase__ , self.__components ) ) + ")"
def __add__(self , lowercase__ ):
snake_case_ : Any = len(self )
if size == len(lowercase__ ):
snake_case_ : Union[str, Any] = [self.__components[i] + other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else:
raise Exception("""must have the same size""" )
def __sub__(self , lowercase__ ):
snake_case_ : Optional[Any] = len(self )
if size == len(lowercase__ ):
snake_case_ : str = [self.__components[i] - other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__(self , lowercase__ ):
...
@overload
def __mul__(self , lowercase__ ):
...
def __mul__(self , lowercase__ ):
if isinstance(lowercase__ , (float, int) ):
snake_case_ : str = [c * other for c in self.__components]
return Vector(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ) and len(self ) == len(lowercase__ ):
snake_case_ : str = len(self )
snake_case_ : List[Any] = [self.__components[i] * other.component(lowercase__ ) for i in range(lowercase__ )]
return sum(lowercase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __UpperCamelCase (self ):
return Vector(self.__components )
def __UpperCamelCase (self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
snake_case_ : Any = value
def __UpperCamelCase (self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
snake_case_ : int = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ = False ):
snake_case_ : str = self * other
snake_case_ : Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
snake_case_ : int = [0] * dimension
snake_case_ : Tuple = 1
return Vector(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = matrix
snake_case_ : List[Any] = w
snake_case_ : List[Any] = h
def __str__(self ):
snake_case_ : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , lowercase__ ):
if self.__width == other.width() and self.__height == other.height():
snake_case_ : List[str] = []
for i in range(self.__height ):
snake_case_ : List[Any] = [
self.__matrix[i][j] + other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__(self , lowercase__ ):
if self.__width == other.width() and self.__height == other.height():
snake_case_ : List[str] = []
for i in range(self.__height ):
snake_case_ : Dict = [
self.__matrix[i][j] - other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__(self , lowercase__ ):
...
@overload
def __mul__(self , lowercase__ ):
...
def __mul__(self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ): # matrix-vector
if len(lowercase__ ) == self.__width:
snake_case_ : Optional[int] = zero_vector(self.__height )
for i in range(self.__height ):
snake_case_ : List[str] = [
self.__matrix[i][j] * other.component(lowercase__ )
for j in range(self.__width )
]
ans.change_component(lowercase__ , sum(lowercase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowercase__ , (int, float) ): # matrix-scalar
snake_case_ : Optional[int] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase__ , self.__width , self.__height )
return None
def __UpperCamelCase (self ):
return self.__height
def __UpperCamelCase (self ):
return self.__width
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case_ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
snake_case_ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase__ ) ):
snake_case_ : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase__ , lowercase__ )
else:
raise Exception("""Indices out of bounds""" )
def __UpperCamelCase (self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case_ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowercase__ ) for y in range(self.__width )
]
return sum(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : list[list[float]] = [[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE__ )
snake_case_ : list[list[float]] = [
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a_ = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """facebook/nllb-200-distilled-600M"""
_A : Any = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
_A : List[Any] = """translator"""
_A : Dict = AutoTokenizer
_A : List[str] = AutoModelForSeqaSeqLM
_A : int = LANGUAGE_CODES
_A : Dict = ["""text""", """text""", """text"""]
_A : str = ["""text"""]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
snake_case_ : int = self.lang_to_code[src_lang]
snake_case_ : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase__ , return_tensors="""pt""" , src_lang=lowercase__ , tgt_lang=lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.model.generate(**lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase__ )
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 5_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
snake_case_ : Union[str, Any] = {}
def __UpperCamelCase (self , lowercase__ , *lowercase__ , **lowercase__ ):
snake_case_ : Optional[int] = super().add_tokens(lowercase__ , *lowercase__ , **lowercase__ )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=1 , **lowercase__ ):
snake_case_ : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
else:
snake_case_ : Dict = []
for i in range(lowercase__ ):
snake_case_ : Union[str, Any] = placeholder_token + f'_{i}'
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
snake_case_ : str = output
def __UpperCamelCase (self , lowercase__ , lowercase__=False , lowercase__=1.0 ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Dict = []
for i in range(len(lowercase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case_ : str = self.token_map[placeholder_token]
snake_case_ : str = tokens[: 1 + int(len(lowercase__ ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case_ : Optional[Any] = copy.copy(lowercase__ )
random.shuffle(lowercase__ )
snake_case_ : Optional[int] = text.replace(lowercase__ , """ """.join(lowercase__ ) )
return text
def __call__(self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=4_00 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , ):
snake_case_ : Dict = size if size is not None else {"""shortest_edge""": 20}
snake_case_ : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ : str = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[int] = image_size
snake_case_ : Dict = min_resolution
snake_case_ : Union[str, Any] = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Dict = size
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : Dict = crop_size
def __UpperCamelCase (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase__ , """size""" ) )
self.assertTrue(hasattr(lowercase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """crop_size""" ) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConditionalDetrFeatureExtractor''']
a_ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=_UpperCAmelCase):
"""simple docstring"""
_A : Tuple = ["""onnx"""]
def __init__(self , *lowercase__ , **lowercase__ ):
requires_backends(self , ["""onnx"""] )
@classmethod
def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ):
requires_backends(cls , ["""onnx"""] )
@classmethod
def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ):
requires_backends(cls , ["""onnx"""] )
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a_ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ):
"""simple docstring"""
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0_2_4 ):
"""simple docstring"""
snake_case_ , snake_case_ : Any = [], []
snake_case_ : Union[str, Any] = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ , snake_case_ : Union[str, Any] = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tok(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case_ : Any = new_src + """ """ + src
snake_case_ : str = new_tgt + """ """ + tgt
if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : int = src, tgt
else: # can fit, keep adding
snake_case_ , snake_case_ : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
snake_case_ : Any = Path(SCREAMING_SNAKE_CASE__ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ["train"]:
snake_case_ , snake_case_ : List[str] = data_dir / f'{split}.source', data_dir / f'{split}.target'
snake_case_ : Tuple = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
snake_case_ : int = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
snake_case_ , snake_case_ : Any = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.' )
Path(save_path / f'{split}.source' ).open("""w""" ).write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
Path(save_path / f'{split}.target' ).open("""w""" ).write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
for split in ["val", "test"]:
snake_case_ , snake_case_ : Union[str, Any] = data_dir / f'{split}.source', data_dir / f'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'{split}.target' )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=SCREAMING_SNAKE_CASE__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=SCREAMING_SNAKE_CASE__ , default=1_2_8 )
parser.add_argument("""--data_dir""" , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--save_path""" , type=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a_ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
snake_case_ : Dict = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , SCREAMING_SNAKE_CASE__ , )
is not None
):
snake_case_ : Tuple = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case_ : Tuple = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case_ : int = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
snake_case_ : str = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
snake_case_ : str = True
if not attribute_used:
snake_case_ : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case_ : List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case_ : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case_ : Optional[int] = True
elif attribute.endswith("""_token_id""" ):
snake_case_ : Tuple = True
# configuration class specific cases
if not case_allowed:
snake_case_ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case_ : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case_ : List[Any] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
snake_case_ : List[str] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case_ : Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
snake_case_ : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case_ : str = inspect.getsourcefile(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case_ : List[str] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
snake_case_ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as fp:
modeling_sources.append(fp.read() )
snake_case_ : Any = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# `attributes` here is all the variant names for `config_param`
snake_case_ : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case_ : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE__ : inspect.isclass(SCREAMING_SNAKE_CASE__ )
and issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case_ : int = check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[Any] = unused_attributes
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[int] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
check_config_attributes()
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
a_ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ["""input_ids""", """attention_mask"""]
_A : List[str] = BartTokenizer
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ):
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
snake_case_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : List[str] = getattr(lowercase__ , pre_tok_state.pop("""type""" ) )
snake_case_ : int = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**lowercase__ )
snake_case_ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ : int = """post_processor"""
snake_case_ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
snake_case_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : int = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ : List[Any] = tuple(state["""cls"""] )
snake_case_ : str = False
if state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : int = True
if state.get("""trim_offsets""" , lowercase__ ) != trim_offsets:
snake_case_ : int = trim_offsets
snake_case_ : int = True
if changes_to_apply:
snake_case_ : int = getattr(lowercase__ , state.pop("""type""" ) )
snake_case_ : List[str] = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def __UpperCamelCase (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
snake_case_ : Tuple = value
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : Union[str, Any] = kwargs.get("""is_split_into_words""" , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : Dict = kwargs.get("""is_split_into_words""" , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Optional[Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__=None ):
snake_case_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[int] = np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = features[:, labels == i]
snake_case_ : Tuple = data.mean(1 )
# Centralize the data of class i
snake_case_ : List[Any] = data - column_reshape(SCREAMING_SNAKE_CASE__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case_ : Optional[int] = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = features.mean(1 )
snake_case_ : Union[str, Any] = np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = features[:, labels == i]
snake_case_ : Union[str, Any] = data.shape[1]
snake_case_ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case_ : Tuple = device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if features.any():
snake_case_ : Tuple = features.mean(1 )
# Center the dataset
snake_case_ : Any = features - np.reshape(SCREAMING_SNAKE_CASE__ , (data_mean.size, 1) )
snake_case_ : Optional[int] = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) / features.shape[1]
snake_case_ , snake_case_ : Dict = np.linalg.eigh(SCREAMING_SNAKE_CASE__ )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case_ : Optional[int] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case_ : int = np.dot(filtered_eigenvectors.T , SCREAMING_SNAKE_CASE__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=SCREAMING_SNAKE_CASE__ )
logging.error("""Dataset empty""" )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case_ , snake_case_ : Union[str, Any] = eigh(
covariance_between_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , covariance_within_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
snake_case_ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
snake_case_ , snake_case_ , snake_case_ : int = np.linalg.svd(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = svd_matrix[:, 0:dimensions]
snake_case_ : Dict = np.dot(filtered_svd_matrix.T , SCREAMING_SNAKE_CASE__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=SCREAMING_SNAKE_CASE__ )
logging.error("""Dataset empty""" )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case_ : Any = np.array([0, 0, 0, 1, 1] )
snake_case_ : Dict = 2
snake_case_ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
snake_case_ : int = linear_discriminant_analysis(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case_ : Any = 2
snake_case_ : List[Any] = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
snake_case_ : str = principal_component_analysis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCAmelCase , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ ):
if self.framework == "tf":
snake_case_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case_ : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase__ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = self.get_masked_index(lowercase__ )
snake_case_ : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __UpperCamelCase (self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__=None , **lowercase__ ):
if return_tensors is None:
snake_case_ : str = self.framework
snake_case_ : Dict = self.tokenizer(lowercase__ , return_tensors=lowercase__ )
self.ensure_exactly_one_mask_token(lowercase__ )
return model_inputs
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.model(**lowercase__ )
snake_case_ : Optional[Any] = model_inputs["""input_ids"""]
return model_outputs
def __UpperCamelCase (self , lowercase__ , lowercase__=5 , lowercase__=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case_ : List[str] = target_ids.shape[0]
snake_case_ : Union[str, Any] = model_outputs["""input_ids"""][0]
snake_case_ : str = model_outputs["""logits"""]
if self.framework == "tf":
snake_case_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case_ : Union[str, Any] = outputs.numpy()
snake_case_ : Dict = outputs[0, masked_index, :]
snake_case_ : Optional[Any] = stable_softmax(lowercase__ , axis=-1 )
if target_ids is not None:
snake_case_ : Tuple = tf.gather_nd(tf.squeeze(lowercase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case_ : List[str] = tf.expand_dims(lowercase__ , 0 )
snake_case_ : str = tf.math.top_k(lowercase__ , k=lowercase__ )
snake_case_ , snake_case_ : Any = topk.values.numpy(), topk.indices.numpy()
else:
snake_case_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case_ : List[Any] = outputs[0, masked_index, :]
snake_case_ : Any = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case_ : int = probs[..., target_ids]
snake_case_ , snake_case_ : Dict = probs.topk(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case_ : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case_ : Any = input_ids.numpy().copy()
if target_ids is not None:
snake_case_ : int = target_ids[p].tolist()
snake_case_ : Union[str, Any] = p
# Filter padding out:
snake_case_ : int = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case_ : Dict = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
snake_case_ : List[str] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowercase__ )
result.append(lowercase__ )
if single_mask:
return result[0]
return result
def __UpperCamelCase (self , lowercase__ , lowercase__=None ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = [targets]
try:
snake_case_ : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
snake_case_ : Any = {}
snake_case_ : Optional[int] = []
for target in targets:
snake_case_ : Optional[Any] = vocab.get(lowercase__ , lowercase__ )
if id_ is None:
snake_case_ : int = self.tokenizer(
lowercase__ , add_special_tokens=lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , max_length=1 , truncation=lowercase__ , )["""input_ids"""]
if len(lowercase__ ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case_ : int = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
snake_case_ : str = list(set(lowercase__ ) )
if len(lowercase__ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case_ : List[str] = np.array(lowercase__ )
return target_ids
def __UpperCamelCase (self , lowercase__=None , lowercase__=None ):
snake_case_ : List[Any] = {}
if targets is not None:
snake_case_ : Union[str, Any] = self.get_target_ids(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = target_ids
if top_k is not None:
snake_case_ : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__(self , lowercase__ , *lowercase__ , **lowercase__ ):
snake_case_ : Tuple = super().__call__(lowercase__ , **lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) == 1:
return outputs[0]
return outputs
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a_ = logging.get_logger(__name__)
a_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=None , lowercase__=None , *lowercase__ , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
snake_case_ : Optional[int] = self.model.config
else:
snake_case_ : Optional[int] = config
snake_case_ : Any = data_args
snake_case_ : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case_ : Optional[int] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case_ : Optional[int] = label_smoothed_nll_loss
def __UpperCamelCase (self , lowercase__ ):
if self.optimizer is None:
snake_case_ : str = ["""bias""", """LayerNorm.weight"""]
snake_case_ : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case_ : Tuple = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case_ : Dict = Adafactor
snake_case_ : List[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case_ : Optional[int] = AdamW
snake_case_ : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case_ : int = self.args.learning_rate
if self.sharded_ddp:
snake_case_ : str = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
snake_case_ : Any = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
snake_case_ : Optional[Any] = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case_ : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case_ : Union[str, Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case_ : Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def __UpperCamelCase (self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case_ : int = model(**lowercase__ , use_cache=lowercase__ )[0]
snake_case_ : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case_ , snake_case_ : Union[str, Any] = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
snake_case_ : int = model(**lowercase__ , use_cache=lowercase__ )[0]
snake_case_ : Optional[int] = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
snake_case_ , snake_case_ : Any = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = inputs.pop("""labels""" )
snake_case_ , snake_case_ : Union[str, Any] = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
snake_case_ : int = self._prepare_inputs(lowercase__ )
snake_case_ : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case_ : int = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case_ : Optional[Any] = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["""max_length"""] )
snake_case_ : int = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case_ , snake_case_ : str = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
snake_case_ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case_ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case_ : Optional[Any] = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
# If PAD token is not defined at least EOS token has to be defined
snake_case_ : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f' padded to `max_length`={max_length}' )
snake_case_ : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case_ : List[Any] = tensor
return padded_tensor
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def __UpperCamelCase (self ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Dict = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : str = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Union[str, Any] = model(lowercase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowercase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase__ , atol=1e-3 ) )
@slow
def __UpperCamelCase (self ):
snake_case_ : List[str] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : List[str] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Optional[int] = model(lowercase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowercase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase__ , atol=1e-3 ) )
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
@register_to_config
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None ):
super().__init__()
snake_case_ : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ : Any = torch.zeros(lowercase__ , lowercase__ )
else:
snake_case_ : Optional[int] = None
snake_case_ : Dict = torch.nn.Parameter(lowercase__ )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : VQModel
_A : CLIPTextModel
_A : CLIPTokenizer
_A : TransformeraDModel
_A : LearnedClassifierFreeSamplingEmbeddings
_A : VQDiffusionScheduler
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
vqvae=lowercase__ , transformer=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , scheduler=lowercase__ , learned_classifier_free_sampling_embeddings=lowercase__ , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
snake_case_ : Union[str, Any] = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ : List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase__ )
# duplicate text embeddings for each generation per prompt
snake_case_ : Tuple = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ : Any = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase__ , 1 , 1 )
else:
snake_case_ : Tuple = [""""""] * batch_size
snake_case_ : Dict = text_input_ids.shape[-1]
snake_case_ : Tuple = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=lowercase__ , truncation=lowercase__ , return_tensors="""pt""" , )
snake_case_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ : int = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Optional[Any] = negative_prompt_embeds.shape[1]
snake_case_ : Optional[int] = negative_prompt_embeds.repeat(1 , lowercase__ , 1 )
snake_case_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , lowercase__ , lowercase__ = 1_00 , lowercase__ = 5.0 , lowercase__ = 1.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = 1
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Optional[int] = len(lowercase__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}' )
snake_case_ : int = batch_size * num_images_per_prompt
snake_case_ : Tuple = guidance_scale > 1.0
snake_case_ : List[Any] = self._encode_prompt(lowercase__ , lowercase__ , lowercase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowercase__ )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case_ : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ : Dict = self.transformer.num_vector_embeds - 1
snake_case_ : Optional[int] = torch.full(lowercase__ , lowercase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case_ : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase__ , device=self.device )
snake_case_ : Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ : List[str] = latents
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ : Any = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ : int = self.transformer(lowercase__ , encoder_hidden_states=lowercase__ , timestep=lowercase__ ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ : Union[str, Any] = model_output.chunk(2 )
snake_case_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase__ , dim=1 , keepdim=lowercase__ )
snake_case_ : str = self.truncate(lowercase__ , lowercase__ )
# remove `log(0)`'s (`-inf`s)
snake_case_ : Union[str, Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Dict = self.scheduler.step(lowercase__ , timestep=lowercase__ , sample=lowercase__ , generator=lowercase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ , lowercase__ )
snake_case_ : Dict = self.vqvae.config.vq_embed_dim
snake_case_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ : List[Any] = self.vqvae.quantize.get_codebook_entry(lowercase__ , shape=lowercase__ )
snake_case_ : Dict = self.vqvae.decode(lowercase__ , force_not_quantize=lowercase__ ).sample
snake_case_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ , snake_case_ : List[Any] = torch.sort(lowercase__ , 1 , descending=lowercase__ )
snake_case_ : Optional[Any] = torch.exp(lowercase__ )
snake_case_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase__ )
snake_case_ : int = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ : List[Any] = keep_mask[:, :-1, :]
snake_case_ : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ : Optional[int] = log_p_x_0.clone()
snake_case_ : Dict = -torch.inf # -inf = log(0)
return rv
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class __lowercase :
"""simple docstring"""
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ : int = _str_to_version_tuple(self.version_str )
def __repr__(self ):
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def __UpperCamelCase (self ):
return self.major, self.minor, self.patch
def __UpperCamelCase (self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(f'{other} (type {type(lowercase__ )}) cannot be compared to version.' )
def __eq__(self , lowercase__ ):
try:
snake_case_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self , lowercase__ ):
snake_case_ : Optional[Any] = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__(self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase (cls , lowercase__ ):
snake_case_ : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase (self ):
return self.version_str
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : List[str] = _VERSION_REG.match(SCREAMING_SNAKE_CASE__ )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(SCREAMING_SNAKE_CASE__ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE__ ) for v in version_tuple )
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
snake_case_ : List[str] = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
snake_case_ : Optional[Any] = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : int = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
snake_case_ : List[Any] = eval_examples
snake_case_ : Tuple = post_process_function
snake_case_ : str = quant_trainer_args
snake_case_ : int = 1_28 # default number of calibration samples
def __UpperCamelCase (self , lowercase__=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
snake_case_ : List[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case_ : str = self._remove_unused_columns(lowercase__ , description="""Calibration""" )
return DataLoader(
lowercase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase__ , )
def __UpperCamelCase (self , lowercase__=None ):
snake_case_ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
snake_case_ : Optional[Any] = self.get_calib_dataloader(lowercase__ )
snake_case_ : List[str] = self.model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args , calib=lowercase__ )
model.eval()
quant_trainer.enable_calibration(lowercase__ )
logger.info("""***** Running calibration *****""" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowercase__ ):
# Prediction step
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.prediction_step(lowercase__ , lowercase__ , prediction_loss_only=lowercase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase__ , self.quant_trainer_args )
snake_case_ : int = model
def __UpperCamelCase (self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = "eval" ):
snake_case_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ : Any = self.get_eval_dataloader(lowercase__ )
snake_case_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Tuple = self.compute_metrics
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : List[Any] = eval_loop(
lowercase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
snake_case_ : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case_ : Dict = self.post_process_function(lowercase__ , lowercase__ , output.predictions )
snake_case_ : int = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
snake_case_ : int = metrics.pop(lowercase__ )
self.log(lowercase__ )
else:
snake_case_ : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__ )
return metrics
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=None , lowercase__ = "test" ):
snake_case_ : str = self.get_test_dataloader(lowercase__ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Optional[Any] = self.compute_metrics
snake_case_ : Any = None
snake_case_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : Any = eval_loop(
lowercase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
snake_case_ : Optional[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ : Optional[Any] = self.post_process_function(lowercase__ , lowercase__ , output.predictions , """predict""" )
snake_case_ : Optional[int] = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
snake_case_ : Optional[int] = metrics.pop(lowercase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__ )
def __UpperCamelCase (self , lowercase__="./" ):
snake_case_ : Any = self.eval_dataset
snake_case_ : Dict = self.get_eval_dataloader(lowercase__ )
snake_case_ : str = next(iter(lowercase__ ) )
# saving device - to make it consistent
snake_case_ : List[str] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
snake_case_ : Any = tuple(v.to(lowercase__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
snake_case_ : Optional[Any] = True
snake_case_ : Union[str, Any] = self.model.to(lowercase__ )
model.eval()
model.float()
snake_case_ : List[str] = model.module if hasattr(lowercase__ , """module""" ) else model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args )
snake_case_ : Dict = os.path.join(lowercase__ , """model.onnx""" )
logger.info(f'exporting model to {output_model_file}' )
snake_case_ : Union[str, Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowercase__ , lowercase__ , lowercase__ , export_params=lowercase__ , opset_version=13 , do_constant_folding=lowercase__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowercase__ , )
logger.info("""onnx export finished""" )
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
snake_case_ : Optional[Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
snake_case_ : Optional[int] = to_pil_image(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Union[str, Any] = pil_image.size
snake_case_ : Optional[Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ , lang=SCREAMING_SNAKE_CASE__ , output_type="""dict""" , config=SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
snake_case_ : List[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()]
snake_case_ : Any = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : List[Any] = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE__ )
# finally, normalize the bounding boxes
snake_case_ : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = "" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Any = get_size_dict(lowercase__ )
snake_case_ : Optional[int] = do_resize
snake_case_ : Any = size
snake_case_ : Optional[int] = resample
snake_case_ : Optional[int] = apply_ocr
snake_case_ : List[Any] = ocr_lang
snake_case_ : Tuple = tesseract_config
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
snake_case_ : Dict = (size["""height"""], size["""width"""])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[str] = size if size is not None else self.size
snake_case_ : List[str] = get_size_dict(lowercase__ )
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : str = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : str = [to_numpy_array(lowercase__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
snake_case_ : Tuple = []
snake_case_ : Tuple = []
for image in images:
snake_case_ , snake_case_ : Optional[int] = apply_tesseract(lowercase__ , lowercase__ , lowercase__ )
words_batch.append(lowercase__ )
boxes_batch.append(lowercase__ )
if do_resize:
snake_case_ : Union[str, Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : Any = [flip_channel_order(lowercase__ ) for image in images]
snake_case_ : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : str = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase__ )
if apply_ocr:
snake_case_ : List[str] = words_batch
snake_case_ : Tuple = boxes_batch
return data
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """trocr"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__(self , lowercase__=5_02_65 , lowercase__=10_24 , lowercase__=12 , lowercase__=16 , lowercase__=40_96 , lowercase__="gelu" , lowercase__=5_12 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=2 , lowercase__=0.02 , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : int = d_model
snake_case_ : Any = decoder_layers
snake_case_ : str = decoder_attention_heads
snake_case_ : Optional[Any] = decoder_ffn_dim
snake_case_ : str = activation_function
snake_case_ : Dict = max_position_embeddings
snake_case_ : Dict = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Tuple = init_std
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : Optional[int] = use_cache
snake_case_ : Union[str, Any] = scale_embedding
snake_case_ : Optional[Any] = use_learned_position_embeddings
snake_case_ : Dict = layernorm_embedding
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a_ = '''.'''
if __name__ == "__main__":
a_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
a_ = []
a_ = []
with open(doctest_file_path) as fp:
for line in fp:
a_ = line.strip()
a_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a_ = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowercase :
"""simple docstring"""
_A : int = BlenderbotConfig
_A : str = {}
_A : List[Any] = """gelu"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , ):
snake_case_ : List[str] = parent
snake_case_ : int = batch_size
snake_case_ : Dict = seq_length
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Optional[Any] = pad_token_id
snake_case_ : Dict = bos_token_id
def __UpperCamelCase (self ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : List[str] = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : str = TFBlenderbotModel(config=lowercase__ ).get_decoder()
snake_case_ : Dict = inputs_dict["""input_ids"""]
snake_case_ : Optional[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : Dict = inputs_dict["""head_mask"""]
snake_case_ : Dict = 1
# first forward pass
snake_case_ : List[str] = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
snake_case_ , snake_case_ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
snake_case_ : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case_ : List[Any] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_A : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_A : Optional[Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : str = True
_A : Optional[Any] = False
_A : Any = False
def __UpperCamelCase (self ):
snake_case_ : Any = TFBlenderbotModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : Dict = ["""My friends are cool but they eat too many carbs."""]
_A : int = """facebook/blenderbot-400M-distill"""
@cached_property
def __UpperCamelCase (self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase (self ):
snake_case_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" )
snake_case_ : Tuple = self.model.generate(
model_inputs.input_ids , )
snake_case_ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = ["""image_processor""", """tokenizer"""]
_A : str = """BlipImageProcessor"""
_A : List[Any] = """AutoTokenizer"""
def __init__(self , lowercase__ , lowercase__ ):
snake_case_ : Any = False
super().__init__(lowercase__ , lowercase__ )
snake_case_ : List[Any] = self.image_processor
def __call__(self , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
snake_case_ : Optional[Any] = self.tokenizer
snake_case_ : int = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
return text_encoding
# add pixel_values
snake_case_ : int = self.image_processor(lowercase__ , return_tensors=lowercase__ )
if text is not None:
snake_case_ : Any = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
else:
snake_case_ : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase__ )
return encoding_image_processor
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase (self ):
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Union[str, Any] = IFInpaintingPipeline
_A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_A : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __UpperCamelCase (self ):
return self._get_dummy_components()
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
if str(lowercase__ ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(lowercase__ )
else:
snake_case_ : Optional[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
snake_case_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase (self ):
self._test_save_load_local()
def __UpperCamelCase (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Tuple = int(SCREAMING_SNAKE_CASE__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : List[Any] = divmod(SCREAMING_SNAKE_CASE__ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE__ ) + str(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[int] = str(SCREAMING_SNAKE_CASE__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
snake_case_ : str = """-""" if number.startswith("""-""" ) else """"""
snake_case_ : Dict = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE__ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
"""simple docstring"""
if rng is None:
snake_case_ : Union[str, Any] = random.Random()
snake_case_ : Optional[Any] = 1
for dim in shape:
total_dims *= dim
snake_case_ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case_ : Dict = np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE__ )
return output
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ):
"""simple docstring"""
snake_case_ : Tuple = ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__ )
# make sure that at least one token is attended to for each batch
snake_case_ : int = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_A : Optional[int] = None
_A : Tuple = ()
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case_ : List[Any] = 2
snake_case_ : Tuple = inputs["""input_ids"""].shape[-1] // 2
snake_case_ : Tuple = inputs["""input_ids"""][:max_batch_size, :sequence_length]
snake_case_ : int = jnp.ones_like(lowercase__ )
snake_case_ : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case_ : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case_ : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self._get_input_ids_and_config()
snake_case_ : Any = False
snake_case_ : int = max_length
snake_case_ : Dict = 0
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ : Dict = getattr(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = pt_model_class(lowercase__ ).eval()
snake_case_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase__ , flax_model.params )
snake_case_ : Tuple = flax_model.generate(lowercase__ ).sequences
snake_case_ : Dict = pt_model.generate(torch.tensor(lowercase__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case_ : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = self._get_input_ids_and_config()
snake_case_ : List[str] = False
snake_case_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Tuple = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Union[str, Any] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = self._get_input_ids_and_config()
snake_case_ : List[Any] = True
snake_case_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Optional[Any] = model_class(lowercase__ )
snake_case_ : List[Any] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Optional[int] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = self._get_input_ids_and_config()
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = max_length
snake_case_ : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case_ : Dict = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[Any] = jit(model.generate )
snake_case_ : Optional[Any] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = self._get_input_ids_and_config()
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = max_length
snake_case_ : Union[str, Any] = 2
snake_case_ : int = 2
for model_class in self.all_generative_model_classes:
snake_case_ : int = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self._get_input_ids_and_config()
snake_case_ : List[str] = True
snake_case_ : List[Any] = max_length
snake_case_ : str = 0.8
snake_case_ : Optional[Any] = 10
snake_case_ : str = 0.3
snake_case_ : str = 1
snake_case_ : Optional[Any] = 8
snake_case_ : int = 9
for model_class in self.all_generative_model_classes:
snake_case_ : int = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Optional[int] = jit(model.generate )
snake_case_ : str = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = self._get_input_ids_and_config()
snake_case_ : str = max_length
snake_case_ : Optional[Any] = 1
snake_case_ : Optional[int] = 8
snake_case_ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case_ : str = model_class(lowercase__ )
snake_case_ : Any = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Any = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = self._get_input_ids_and_config()
snake_case_ : List[str] = max_length
snake_case_ : Optional[int] = 2
snake_case_ : List[Any] = 1
snake_case_ : Optional[Any] = 8
snake_case_ : str = 9
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[Any] = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Tuple = attention_mask.at[(0, 0)].set(0 )
snake_case_ : List[str] = False
snake_case_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Dict = model_class(lowercase__ )
snake_case_ : Optional[Any] = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : str = jit(model.generate )
snake_case_ : int = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Tuple = attention_mask.at[(0, 0)].set(0 )
snake_case_ : Dict = True
snake_case_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : str = model_class(lowercase__ )
snake_case_ : Dict = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Tuple = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Any = attention_mask.at[(0, 0)].set(0 )
snake_case_ : Tuple = 2
snake_case_ : Any = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Any = model_class(lowercase__ )
snake_case_ : Union[str, Any] = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Optional[Any] = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
snake_case_ : int = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
snake_case_ : List[Any] = """Hello world"""
snake_case_ : Optional[int] = tokenizer(lowercase__ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase__ , """do_samples""" ):
model.generate(lowercase__ , do_samples=lowercase__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase__ , """foo""" ):
snake_case_ : Optional[int] = {"""foo""": """bar"""}
model.generate(lowercase__ , **lowercase__ )
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case_ : Tuple = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
snake_case_ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Any = v
else:
snake_case_ : Optional[int] = v
snake_case_ : int = chkpt["""params"""]
snake_case_ : List[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : List[str] = chkpt["""dico_word2id"""]
snake_case_ : Any = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 1_3 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = job["""started_at"""]
snake_case_ : Tuple = job["""completed_at"""]
snake_case_ : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = date_parser.parse(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ : int = start
snake_case_ : str = end
snake_case_ : Optional[Any] = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
snake_case_ : Optional[int] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
snake_case_ : Optional[Any] = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
snake_case_ : Optional[int] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__ ) for job in result["""jobs"""]} )
snake_case_ : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = requests.get(url + f'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
a_ = parser.parse_args()
a_ = get_job_time(args.workflow_run_id)
a_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a_ = 3
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
snake_case_ : Union[str, Any] = random.randrange(3 , SCREAMING_SNAKE_CASE__ )
if pow(SCREAMING_SNAKE_CASE__ , 2 , SCREAMING_SNAKE_CASE__ ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
print("""Generating prime p...""" )
snake_case_ : Dict = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE__ ) # select large prime number.
snake_case_ : Tuple = primitive_root(SCREAMING_SNAKE_CASE__ ) # one primitive root on modulo p.
snake_case_ : Union[str, Any] = random.randrange(3 , SCREAMING_SNAKE_CASE__ ) # private_key -> have to be greater than 2 for safety.
snake_case_ : List[str] = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = (key_size, e_a, e_a, p)
snake_case_ : Any = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case_ , snake_case_ : Optional[int] = generate_key(SCREAMING_SNAKE_CASE__ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , """w""" ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , """w""" ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2_0_4_8 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[str] = """Salesforce/blip-image-captioning-base"""
_A : List[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_A : Optional[int] = """image_captioner"""
_A : Union[str, Any] = AutoModelForVisionaSeq
_A : Any = ["""image"""]
_A : Dict = ["""text"""]
def __init__(self , *lowercase__ , **lowercase__ ):
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.pre_processor(images=lowercase__ , return_tensors="""pt""" )
def __UpperCamelCase (self , lowercase__ ):
return self.model.generate(**lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.pre_processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )[0].strip()
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : str = x
snake_case_ : Dict = y
for step in range(SCREAMING_SNAKE_CASE__ ): # noqa: B007
snake_case_ : List[str] = a * a - b * b + x
snake_case_ : Optional[int] = 2 * a * b + y
snake_case_ : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE__ , 1 , 1 ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 8_0_0 , SCREAMING_SNAKE_CASE__ : int = 6_0_0 , SCREAMING_SNAKE_CASE__ : float = -0.6 , SCREAMING_SNAKE_CASE__ : float = 0 , SCREAMING_SNAKE_CASE__ : float = 3.2 , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
snake_case_ : Dict = Image.new("""RGB""" , (image_width, image_height) )
snake_case_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE__ ):
for image_y in range(SCREAMING_SNAKE_CASE__ ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ : Union[str, Any] = figure_width / image_width * image_height
snake_case_ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ : int = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ : Optional[Any] = get_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ : List[Any] = get_color_coded_rgb(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : int = get_black_and_white_rgb(SCREAMING_SNAKE_CASE__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
a_ = tuple[int, int]
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : Union[str, Any] = []
snake_case_ : List[str] = set()
def __UpperCamelCase (self ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def __UpperCamelCase (self ):
return len(self.elements ) == 0
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowercase__ )
else:
# update
# print("update", item)
snake_case_ : Optional[int] = []
((snake_case_) , (snake_case_)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __UpperCamelCase (self , lowercase__ ):
if item in self.set:
self.set.remove(lowercase__ )
snake_case_ : List[str] = []
((snake_case_) , (snake_case_)) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) : int = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __UpperCamelCase (self ):
return self.elements[0][1]
def __UpperCamelCase (self ):
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
self.set.remove(lowercase__ )
return (priority, item)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ):
"""simple docstring"""
snake_case_ : Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = np.array(SCREAMING_SNAKE_CASE__ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ):
"""simple docstring"""
return consistent_heuristic(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) // t
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : dict[TPos, float] ):
"""simple docstring"""
snake_case_ : int = g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ans
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = """*"""
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (j, (n - 1) - i) in blocks:
snake_case_ : Dict = """#"""
snake_case_ : Dict = """-"""
snake_case_ : List[Any] = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) : Union[str, Any] = x
# print(x)
snake_case_ : List[Any] = """-"""
snake_case_ : Optional[Any] = back_pointer[x]
snake_case_ : Optional[int] = """-"""
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
snake_case_ : int = back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE__ , end=""" """ )
snake_case_ : int = back_pointer[x]
print(SCREAMING_SNAKE_CASE__ )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , ):
"""simple docstring"""
for itera in range(SCREAMING_SNAKE_CASE__ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE__ )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) : Optional[Any] = s
snake_case_ : Union[str, Any] = (x - 1, y)
snake_case_ : Any = (x + 1, y)
snake_case_ : Tuple = (x, y + 1)
snake_case_ : int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = -1
snake_case_ : Union[str, Any] = float("""inf""" )
if valid(SCREAMING_SNAKE_CASE__ ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ : Optional[Any] = g_function[s] + 1
snake_case_ : Tuple = s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE__ ):
if key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) <= Wa * key(
SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
open_list[j].put(
SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
a_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ = make_common_ground()
a_ = blocks_blk
# hyper parameters
a_ = 1
a_ = 1
a_ = 20
a_ = 3 # one consistent and two other inconsistent
# start and end destination
a_ = (0, 0)
a_ = (n - 1, n - 1)
a_ = 1
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Tuple = {start: 0, goal: float("""inf""" )}
snake_case_ : Tuple = {start: -1, goal: -1}
snake_case_ : int = []
snake_case_ : str = set()
for i in range(SCREAMING_SNAKE_CASE__ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ : list[int] = []
snake_case_ : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ , snake_case_ : Dict = open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE__ )
expand_state(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
close_list_inad.append(SCREAMING_SNAKE_CASE__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Optional[int] = open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE__ )
expand_state(
SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE__ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num <= 0:
snake_case_ : Tuple = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = [True] * (num + 1)
snake_case_ : List[Any] = []
snake_case_ : Union[str, Any] = 2
snake_case_ : List[str] = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
snake_case_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
# Imports
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
def __UpperCamelCase (self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
if red is not None:
snake_case_ : int = red
if green is not None:
snake_case_ : List[Any] = green
if blue is not None:
snake_case_ : Tuple = blue
if red_edge is not None:
snake_case_ : Any = red_edge
if nir is not None:
snake_case_ : Optional[int] = nir
return True
def __UpperCamelCase (self , lowercase__="" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
snake_case_ : Union[str, Any] = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def __UpperCamelCase (self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __UpperCamelCase (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __UpperCamelCase (self ):
return self.nir * (self.red / (self.green**2))
def __UpperCamelCase (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __UpperCamelCase (self ):
return (self.nir - self.red) / (self.nir + self.red)
def __UpperCamelCase (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __UpperCamelCase (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __UpperCamelCase (self ):
return (self.nir - self.green) / (self.nir + self.green)
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __UpperCamelCase (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __UpperCamelCase (self , lowercase__=0.08 , lowercase__=1.22 , lowercase__=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __UpperCamelCase (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __UpperCamelCase (self ):
return (self.nir / self.green) - 1
def __UpperCamelCase (self ):
return (self.nir / self.redEdge) - 1
def __UpperCamelCase (self ):
return (self.red - self.blue) / self.red
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __UpperCamelCase (self ):
return self.nir - self.green
def __UpperCamelCase (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __UpperCamelCase (self ):
snake_case_ : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __UpperCamelCase (self , lowercase__=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __UpperCamelCase (self , lowercase__=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __UpperCamelCase (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __UpperCamelCase (self , lowercase__=None , lowercase__=None ):
return (self.nir - b) / (a * self.red)
def __UpperCamelCase (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __UpperCamelCase (self ):
return (self.red + self.green + self.blue) / 30.5
def __UpperCamelCase (self ):
return self.nir / self.red
def __UpperCamelCase (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __UpperCamelCase (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __UpperCamelCase (self ):
return self.green / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return self.nir / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return self.red / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return (self.green - self.red) / (self.green + self.red)
def __UpperCamelCase (self ):
return (self.red - self.green) / (self.red + self.green)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __UpperCamelCase (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __UpperCamelCase (self ):
return self.nir / self.red
def __UpperCamelCase (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __UpperCamelCase (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : int = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , SCREAMING_SNAKE_CASE__ ).groups()[0]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=None , lowercase__=None ):
snake_case_ : Union[str, Any] = file_names
snake_case_ : Tuple = image_transform
snake_case_ : Union[str, Any] = label_to_id
def __len__(self ):
return len(self.file_names )
def __getitem__(self , lowercase__ ):
snake_case_ : Optional[Any] = self.file_names[idx]
snake_case_ : Dict = PIL.Image.open(lowercase__ )
snake_case_ : Union[str, Any] = raw_image.convert("""RGB""" )
if self.image_transform is not None:
snake_case_ : Union[str, Any] = self.image_transform(lowercase__ )
snake_case_ : Dict = extract_label(lowercase__ )
if self.label_to_id is not None:
snake_case_ : Union[str, Any] = self.label_to_id[label]
return {"image": image, "label": label}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
if args.with_tracking:
snake_case_ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
snake_case_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Tuple = config["""lr"""]
snake_case_ : Any = int(config["""num_epochs"""] )
snake_case_ : str = int(config["""seed"""] )
snake_case_ : Any = int(config["""batch_size"""] )
snake_case_ : Any = config["""image_size"""]
if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
snake_case_ : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
snake_case_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case_ : Any = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
snake_case_ : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case_ : Any = os.path.split(SCREAMING_SNAKE_CASE__ )[-1].split(""".""" )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Grab all the image filenames
snake_case_ : Any = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
snake_case_ : int = [extract_label(SCREAMING_SNAKE_CASE__ ) for fname in file_names]
snake_case_ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) )
id_to_label.sort()
snake_case_ : str = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE__ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(SCREAMING_SNAKE_CASE__ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE__ )
# Split our filenames between train and validation
snake_case_ : Union[str, Any] = np.random.permutation(len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Dict = int(0.8 * len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Union[str, Any] = random_perm[:cut]
snake_case_ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case_ : Any = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.5, 1.0) ), ToTensor()] )
snake_case_ : Tuple = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# For evaluation, we use a deterministic Resize
snake_case_ : Tuple = Compose([Resize(SCREAMING_SNAKE_CASE__ ), ToTensor()] )
snake_case_ : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
snake_case_ : Optional[int] = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = create_model("""resnet50d""" , pretrained=SCREAMING_SNAKE_CASE__ , num_classes=len(SCREAMING_SNAKE_CASE__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case_ : Any = False
for param in model.get_classifier().parameters():
snake_case_ : List[str] = True
# We normalize the batches of images to be a bit faster.
snake_case_ : Dict = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
snake_case_ : Any = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
snake_case_ : Dict = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE__ , max_lr=SCREAMING_SNAKE_CASE__ , epochs=SCREAMING_SNAKE_CASE__ , steps_per_epoch=len(SCREAMING_SNAKE_CASE__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
snake_case_ : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case_ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case_ : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case_ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case_ : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case_ : Union[str, Any] = os.path.splitext(SCREAMING_SNAKE_CASE__ )[0]
if "epoch" in training_difference:
snake_case_ : Tuple = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
snake_case_ : str = None
else:
snake_case_ : Tuple = int(training_difference.replace("""step_""" , """""" ) )
snake_case_ : Optional[Any] = resume_step // len(SCREAMING_SNAKE_CASE__ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
if args.with_tracking:
snake_case_ : Dict = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case_ : Tuple = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case_ : Dict = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case_ : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case_ : List[str] = (batch["""image"""] - mean) / std
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE__ , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case_ : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ : List[str] = 0
snake_case_ : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case_ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case_ : Optional[Any] = (batch["""image"""] - mean) / std
with torch.no_grad():
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = outputs.argmax(dim=-1 )
snake_case_ , snake_case_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
snake_case_ : Dict = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case_ : Tuple = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_0_0 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 1_0_0 * eval_metric,
"""train_loss""": total_loss.item() / len(SCREAMING_SNAKE_CASE__ ),
"""epoch""": epoch,
} , step=SCREAMING_SNAKE_CASE__ , )
if checkpointing_steps == "epoch":
snake_case_ : Dict = f'epoch_{epoch}'
if args.output_dir is not None:
snake_case_ : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=SCREAMING_SNAKE_CASE__ , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=SCREAMING_SNAKE_CASE__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Dict = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 6_4, """image_size""": 2_2_4}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=4_00 , lowercase__=True , lowercase__=None , lowercase__=True , ):
snake_case_ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ : Union[str, Any] = parent
snake_case_ : Any = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Dict = image_size
snake_case_ : List[Any] = min_resolution
snake_case_ : Optional[int] = max_resolution
snake_case_ : int = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = apply_ocr
def __UpperCamelCase (self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase__ , """size""" ) )
self.assertTrue(hasattr(lowercase__ , """apply_ocr""" ) )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowercase__ )
self.assertIsInstance(encoding.boxes , lowercase__ )
# Test batched
snake_case_ : int = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case_ : str = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase (self ):
# with apply_OCR = True
snake_case_ : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case_ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Optional[Any] = image_processing(lowercase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : List[Any] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase__ )
self.assertListEqual(encoding.boxes , lowercase__ )
# with apply_OCR = False
snake_case_ : str = LayoutLMvaImageProcessor(apply_ocr=lowercase__ )
snake_case_ : Any = image_processing(lowercase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a_ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = ["""input_ids""", """attention_mask"""]
_A : Optional[int] = NllbTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
snake_case_ : str = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
snake_case_ : str = vocab_file
snake_case_ : str = False if not self.vocab_file else True
snake_case_ : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Optional[Any] = src_lang if src_lang is not None else """eng_Latn"""
snake_case_ : Dict = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : int = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : List[Any] = src_lang
snake_case_ : str = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : str = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : List[str] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "eng_Latn" , lowercase__ = None , lowercase__ = "fra_Latn" , **lowercase__ , ):
snake_case_ : List[Any] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
snake_case_ : Dict = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : Optional[int] = [self.cur_lang_code]
snake_case_ : List[str] = [self.eos_token_id]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : Any = [self.cur_lang_code]
snake_case_ : Union[str, Any] = [self.eos_token_id]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : str = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
snake_case_ : str = self.transformer_dir
shutil.copy(
os.path.join(lowercase__ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
snake_case_ : Optional[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
snake_case_ : List[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
snake_case_ : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case_ : Optional[int] = black.format_str(lowercase__ , mode=lowercase__ )
snake_case_ : Any = os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowercase__ , """w""" , newline="""\n""" ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase__ )
with open(lowercase__ , """r""" ) as f:
self.assertTrue(f.read() , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowercase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowercase__ ) , )
# Copy consistency with a really long name
snake_case_ : List[Any] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , lowercase__ , lowercase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowercase__ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowercase__ ) , )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
snake_case_ : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
snake_case_ : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
snake_case_ , snake_case_ : Any = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme["""format_model_list"""] )
self.assertFalse(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
snake_case_ , snake_case_ : Tuple = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase__ )
snake_case_ : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
snake_case_ : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ , snake_case_ : Dict = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowercase__ , lowercase__ )
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Dict = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_56, """width""": 2_56}
snake_case_ : Tuple = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Dict = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : List[Any] = do_rescale
snake_case_ : int = rescale_factor
snake_case_ : Dict = do_center_crop
snake_case_ : Union[str, Any] = crop_size
snake_case_ : str = do_flip_channel_order
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = PIL.Image.BILINEAR , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
snake_case_ : int = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : List[Any] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Tuple = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case_ : Any = size if size is not None else self.size
snake_case_ : int = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
snake_case_ : str = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Union[str, Any] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Any = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case_ : Tuple = [self.flip_channel_order(image=lowercase__ ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase__ ):
snake_case_ : str = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(lowercase__ ) ):
snake_case_ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase__ )
snake_case_ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
snake_case_ : int = logits.argmax(dim=1 )
snake_case_ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = CpmAntTokenizer
_A : List[Any] = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : str = """今天天气真好!"""
snake_case_ : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Dict = """今天天气真好!"""
snake_case_ : Union[str, Any] = [tokenizer.bos_token] + tokens
snake_case_ : Union[str, Any] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """feature_extractor"""]
_A : List[str] = """TvltImageProcessor"""
_A : Optional[Any] = """TvltFeatureExtractor"""
def __init__(self , lowercase__ , lowercase__ ):
super().__init__(image_processor=lowercase__ , feature_extractor=lowercase__ )
snake_case_ : str = image_processor
snake_case_ : Dict = feature_extractor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , *lowercase__ , **lowercase__ , ):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
snake_case_ : Optional[Any] = None
if images is not None:
snake_case_ : str = self.image_processor(lowercase__ , mask_pixel=lowercase__ , *lowercase__ , **lowercase__ )
if images_mixed is not None:
snake_case_ : Union[str, Any] = self.image_processor(lowercase__ , is_mixed=lowercase__ , *lowercase__ , **lowercase__ )
if audio is not None:
snake_case_ : Tuple = self.feature_extractor(
lowercase__ , *lowercase__ , sampling_rate=lowercase__ , mask_audio=lowercase__ , **lowercase__ )
snake_case_ : int = {}
if audio is not None:
output_dict.update(lowercase__ )
if images is not None:
output_dict.update(lowercase__ )
if images_mixed_dict is not None:
output_dict.update(lowercase__ )
return output_dict
@property
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.image_processor.model_input_names
snake_case_ : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@staticmethod
def __UpperCamelCase (lowercase__ ):
snake_case_ : str = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowercase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowercase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowercase__ )
def __init__(self , lowercase__ , *lowercase__ ):
snake_case_ : Any = accelerate_config_file
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """not installed"""
if is_safetensors_available():
import safetensors
snake_case_ : str = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
snake_case_ : Union[str, Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
snake_case_ : Any = """not installed"""
snake_case_ : List[str] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case_ : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowercase__ ):
snake_case_ : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case_ : str = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else f'\t{accelerate_config}'
)
snake_case_ : List[str] = """not installed"""
snake_case_ : Dict = """NA"""
if is_torch_available():
import torch
snake_case_ : Optional[Any] = torch.__version__
snake_case_ : List[Any] = torch.cuda.is_available()
snake_case_ : Dict = """not installed"""
snake_case_ : Any = """NA"""
if is_tf_available():
import tensorflow as tf
snake_case_ : Dict = tf.__version__
try:
# deprecated in v2.1
snake_case_ : Any = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case_ : List[str] = bool(tf.config.list_physical_devices("""GPU""" ) )
snake_case_ : Optional[Any] = """not installed"""
snake_case_ : Union[str, Any] = """not installed"""
snake_case_ : Dict = """not installed"""
snake_case_ : Tuple = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
snake_case_ : Tuple = flax.__version__
snake_case_ : str = jax.__version__
snake_case_ : Optional[int] = jaxlib.__version__
snake_case_ : List[Any] = jax.lib.xla_bridge.get_backend().platform
snake_case_ : Any = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'{safetensors_version}',
"""Accelerate version""": f'{accelerate_version}',
"""Accelerate config""": f'{accelerate_config_str}',
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": f'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": f'{flax_version} ({jax_backend})',
"""Jax version""": f'{jax_version}',
"""JaxLib version""": f'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def __UpperCamelCase (lowercase__ ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
snake_case_ : List[str] = parent
snake_case_ : int = batch_size
snake_case_ : int = seq_length
snake_case_ : Dict = is_training
snake_case_ : List[Any] = use_input_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : Union[str, Any] = num_labels
snake_case_ : List[str] = num_choices
snake_case_ : Optional[int] = scope
def __UpperCamelCase (self ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : str = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[int] = None
snake_case_ : List[str] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase (self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , use_stable_embedding=lowercase__ , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Optional[int] = OpenLlamaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Dict = model(lowercase__ , attention_mask=lowercase__ )
snake_case_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : List[Any] = True
snake_case_ : Tuple = OpenLlamaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : List[Any] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
snake_case_ : Dict = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , )
snake_case_ : Any = model(lowercase__ , attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : str = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Dict = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : List[str] = True
snake_case_ : Dict = True
snake_case_ : int = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
snake_case_ : Optional[Any] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , use_cache=lowercase__ , )
snake_case_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : List[str] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , output_hidden_states=lowercase__ , )["""hidden_states"""][0]
snake_case_ : List[Any] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )["""hidden_states"""][0]
# select random slice
snake_case_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def __UpperCamelCase (self ):
snake_case_ : str = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Union[str, Any] = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Any = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A : List[str] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[int] = False
_A : Tuple = False
def __UpperCamelCase (self ):
snake_case_ : Tuple = OpenLlamaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : List[str] = type
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = 3
snake_case_ : Dict = input_dict["""input_ids"""]
snake_case_ : List[str] = input_ids.ne(1 ).to(lowercase__ )
snake_case_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : List[str] = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = 3
snake_case_ : str = """single_label_classification"""
snake_case_ : Optional[Any] = input_dict["""input_ids"""]
snake_case_ : Optional[int] = input_ids.ne(1 ).to(lowercase__ )
snake_case_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Optional[Any] = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Dict = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = 3
snake_case_ : Any = """multi_label_classification"""
snake_case_ : Optional[int] = input_dict["""input_ids"""]
snake_case_ : Tuple = input_ids.ne(1 ).to(lowercase__ )
snake_case_ : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ : Any = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : str = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __UpperCamelCase (self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
snake_case_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Tuple = OpenLlamaModel(lowercase__ )
original_model.to(lowercase__ )
original_model.eval()
snake_case_ : str = original_model(lowercase__ ).last_hidden_state
snake_case_ : Optional[Any] = original_model(lowercase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
snake_case_ : int = OpenLlamaModel(lowercase__ )
scaled_model.to(lowercase__ )
scaled_model.eval()
snake_case_ : str = scaled_model(lowercase__ ).last_hidden_state
snake_case_ : Optional[Any] = scaled_model(lowercase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __lowercase :
"""simple docstring"""
_A : int = BlenderbotSmallConfig
_A : int = {}
_A : List[str] = """gelu"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , ):
snake_case_ : Any = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[int] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : List[Any] = eos_token_id
snake_case_ : List[Any] = pad_token_id
snake_case_ : str = bos_token_id
def __UpperCamelCase (self ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : int = prepare_blenderbot_small_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = TFBlenderbotSmallModel(config=lowercase__ ).get_decoder()
snake_case_ : List[str] = inputs_dict["""input_ids"""]
snake_case_ : int = input_ids[:1, :]
snake_case_ : List[Any] = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : Optional[Any] = inputs_dict["""head_mask"""]
snake_case_ : List[str] = 1
# first forward pass
snake_case_ : Dict = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
snake_case_ , snake_case_ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : Optional[int] = model(lowercase__ , attention_mask=lowercase__ )[0]
snake_case_ : List[str] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_A : Any = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_A : List[Any] = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : int = True
_A : Optional[int] = False
_A : Dict = False
def __UpperCamelCase (self ):
snake_case_ : Any = TFBlenderbotSmallModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : Optional[Any] = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
_A : Optional[Any] = """facebook/blenderbot_small-90M"""
@cached_property
def __UpperCamelCase (self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def __UpperCamelCase (self ):
snake_case_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.tokenizer(self.src_text , return_tensors="""tf""" )
snake_case_ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase__ , )
snake_case_ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : str = word.split()
def justify(SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
snake_case_ : Dict = max_width - width
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case_ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case_ : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case_ : Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE__ ):
num_spaces_between_words_list[i] += 1
snake_case_ : Union[str, Any] = []
for i in range(SCREAMING_SNAKE_CASE__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = []
snake_case_ : list[str] = []
snake_case_ : Tuple = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE__ ) + len(SCREAMING_SNAKE_CASE__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE__ )
width += len(SCREAMING_SNAKE_CASE__ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# reset new line and new width
snake_case_ , snake_case_ : List[Any] = [word], len(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = max_width - width - len(SCREAMING_SNAKE_CASE__ )
answer.append(""" """.join(SCREAMING_SNAKE_CASE__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ["""input_ids""", """attention_mask"""]
_A : Optional[Any] = RobertaTokenizer
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ):
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : Optional[int] = getattr(lowercase__ , pre_tok_state.pop("""type""" ) )
snake_case_ : Union[str, Any] = add_prefix_space
snake_case_ : str = pre_tok_class(**lowercase__ )
snake_case_ : int = add_prefix_space
snake_case_ : Dict = """post_processor"""
snake_case_ : Dict = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
snake_case_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : Dict = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ : Dict = tuple(state["""cls"""] )
snake_case_ : List[Any] = False
if state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : str = add_prefix_space
snake_case_ : Optional[Any] = True
if state.get("""trim_offsets""" , lowercase__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : Optional[Any] = True
if changes_to_apply:
snake_case_ : Dict = getattr(lowercase__ , state.pop("""type""" ) )
snake_case_ : Any = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def __UpperCamelCase (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
snake_case_ : Any = value
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : List[Any] = kwargs.get("""is_split_into_words""" , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : List[Any] = kwargs.get("""is_split_into_words""" , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Any = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__=None ):
snake_case_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowercase :
"""simple docstring"""
_A : List[str] = PegasusConfig
_A : str = {}
_A : Dict = """gelu"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=40 , lowercase__=2 , lowercase__=1 , lowercase__=0 , ):
snake_case_ : Optional[Any] = parent
snake_case_ : str = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Any = eos_token_id
snake_case_ : List[Any] = pad_token_id
snake_case_ : Union[str, Any] = bos_token_id
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : int = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Dict = TFPegasusModel(config=lowercase__ ).get_decoder()
snake_case_ : int = inputs_dict["""input_ids"""]
snake_case_ : Dict = input_ids[:1, :]
snake_case_ : Tuple = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : Optional[Any] = inputs_dict["""head_mask"""]
snake_case_ : Dict = 1
# first forward pass
snake_case_ : Tuple = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
snake_case_ , snake_case_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : Optional[int] = model(lowercase__ , attention_mask=lowercase__ )[0]
snake_case_ : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case_ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_A : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = True
_A : Optional[Any] = False
_A : Optional[int] = False
def __UpperCamelCase (self ):
snake_case_ : List[Any] = TFPegasusModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : Dict = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_A : Optional[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_A : int = """google/pegasus-xsum"""
@cached_property
def __UpperCamelCase (self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase (self ):
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCamelCase (self , **lowercase__ ):
snake_case_ : Any = self.translate_src_text(**lowercase__ )
assert self.expected_text == generated_words
def __UpperCamelCase (self , **lowercase__ ):
snake_case_ : Dict = self.tokenizer(self.src_text , **lowercase__ , padding=lowercase__ , return_tensors="""tf""" )
snake_case_ : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase__ , )
snake_case_ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )
return generated_words
@slow
def __UpperCamelCase (self ):
self._assert_generated_batch_equal_expected()
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = """laion/clap-htsat-unfused"""
snake_case_ : Any = tempfile.mkdtemp()
def __UpperCamelCase (self , **lowercase__ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase__ )
def __UpperCamelCase (self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_feature_extractor()
snake_case_ : List[str] = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ : List[str] = self.get_feature_extractor(do_normalize=lowercase__ , padding_value=1.0 )
snake_case_ : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.get_feature_extractor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Union[str, Any] = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
snake_case_ : int = floats_list((3, 10_00) )
snake_case_ : List[str] = feature_extractor(lowercase__ , return_tensors="""np""" )
snake_case_ : List[str] = processor(audios=lowercase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase (self ):
snake_case_ : int = self.get_feature_extractor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : int = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
snake_case_ : int = """This is a test string"""
snake_case_ : Tuple = processor(text=lowercase__ )
snake_case_ : Optional[int] = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Any = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
snake_case_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : Optional[int] = processor.batch_decode(lowercase__ )
snake_case_ : Tuple = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[Any] = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a_ = 6_3_7_8_1_3_7.0
a_ = 6_3_5_6_7_5_2.3_1_4_2_4_5
a_ = 6378137
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
snake_case_ : Union[str, Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ : List[Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
snake_case_ : Union[str, Any] = haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
snake_case_ : Union[str, Any] = (b_lata + b_lata) / 2
snake_case_ : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
snake_case_ : int = (sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2)
snake_case_ : Optional[Any] = cos(sigma / 2 ) ** 2
snake_case_ : Optional[Any] = (sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
snake_case_ : Optional[int] = (cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2)
snake_case_ : List[str] = sin(sigma / 2 ) ** 2
snake_case_ : Optional[Any] = (sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] | None = None ):
"""simple docstring"""
snake_case_ : List[Any] = word_bank or []
# create a table
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ ) + 1
snake_case_ : list[list[list[str]]] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
table.append([] )
# seed value
snake_case_ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE__ )] == word:
snake_case_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE__ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""question""": Value("""string"""), """context""": Value("""string""")})
_A : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string"""),
"""answer_start""": Value("""int32"""),
})
})
_A : str = "question"
_A : str = "context"
_A : str = "answers"
@property
def __UpperCamelCase (self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """data2vec-vision"""
def __init__(self , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=2_24 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=0.1 , lowercase__=0.1 , lowercase__=True , lowercase__=[3, 5, 7, 11] , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = image_size
snake_case_ : str = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[str] = use_mask_token
snake_case_ : List[str] = use_absolute_position_embeddings
snake_case_ : List[str] = use_relative_position_bias
snake_case_ : Dict = use_shared_relative_position_bias
snake_case_ : int = layer_scale_init_value
snake_case_ : List[str] = drop_path_rate
snake_case_ : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ : List[str] = out_indices
snake_case_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ : List[str] = use_auxiliary_head
snake_case_ : Optional[int] = auxiliary_loss_weight
snake_case_ : int = auxiliary_channels
snake_case_ : str = auxiliary_num_convs
snake_case_ : Dict = auxiliary_concat_input
snake_case_ : List[str] = semantic_loss_ignore_index
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = version.parse("""1.11""")
@property
def __UpperCamelCase (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase (self ):
return 1e-4
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
snake_case_ : str = """backbone.""" if is_semantic else """"""
snake_case_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
snake_case_ : Union[str, Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
snake_case_ : str = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
snake_case_ : Tuple = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
snake_case_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : List[Any] = q_bias
snake_case_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ : Dict = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
snake_case_ : List[Any] = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
snake_case_ : int = gamma_a
snake_case_ : Any = gamma_a
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : str = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = val
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
"""simple docstring"""
snake_case_ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
snake_case_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ : Any = 1_0_2_4
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : Any = 2_4
snake_case_ : Optional[int] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ : List[Any] = 1_6
snake_case_ : Tuple = """huggingface/label-files"""
snake_case_ : Tuple = """rvlcdip-id2label.json"""
snake_case_ : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
snake_case_ : int = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
snake_case_ : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
snake_case_ : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
snake_case_ : Optional[int] = encoding["""pixel_values"""]
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = outputs.logits
# verify logits
snake_case_ : Any = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
snake_case_ : int = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
snake_case_ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
a_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int | str] ):
"""simple docstring"""
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ : List[Any] = True
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ )
current_sequence.pop()
snake_case_ : int = False
a_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
a_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : Tuple = {}
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = {}
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if nodea not in self.connections:
self.add_node(lowercase__ )
if nodea not in self.connections:
self.add_node(lowercase__ )
snake_case_ : Optional[Any] = probability
def __UpperCamelCase (self ):
return list(self.connections )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = 0
snake_case_ : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[tuple[str, str, float]] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = Counter(graph.get_nodes() )
snake_case_ : List[str] = start
for _ in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[Any] = graph.transition(SCREAMING_SNAKE_CASE__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if "resnet-50" in model_name:
snake_case_ : Union[str, Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
snake_case_ : Optional[int] = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
snake_case_ : List[str] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
snake_case_ : Union[str, Any] = """panoptic""" in model_name
if is_panoptic:
snake_case_ : Tuple = 2_5_0
else:
snake_case_ : Optional[Any] = 9_1
snake_case_ : Union[str, Any] = """huggingface/label-files"""
snake_case_ : List[Any] = """coco-detection-id2label.json"""
snake_case_ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Tuple = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : List[Any] = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = val
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=False ):
"""simple docstring"""
snake_case_ : Tuple = """"""
if is_panoptic:
snake_case_ : str = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : List[Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : List[Any] = in_proj_weight[:2_5_6, :]
snake_case_ : Dict = in_proj_bias[:2_5_6]
snake_case_ : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Optional[int] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Dict = in_proj_weight[-2_5_6:, :]
snake_case_ : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[:2_5_6, :]
snake_case_ : Tuple = in_proj_bias[:2_5_6]
snake_case_ : str = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Optional[int] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Optional[int] = in_proj_weight[-2_5_6:, :]
snake_case_ : str = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
snake_case_ : List[str] = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case_ : Any = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
snake_case_ : int = in_proj_bias_cross_attn[:2_5_6]
snake_case_ : Dict = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
snake_case_ : Any = in_proj_bias_cross_attn[2_5_6:5_1_2]
snake_case_ : Tuple = in_proj_weight_cross_attn[-2_5_6:, :]
snake_case_ : int = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
"""simple docstring"""
snake_case_ , snake_case_ : Any = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
snake_case_ : str = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'Converting model {model_name}...' )
snake_case_ : List[str] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case_ : Dict = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
snake_case_ : Union[str, Any] = """detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : Union[str, Any] = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case_ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : int = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case_ : Any = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case_ : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
snake_case_ : int = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case_ : List[Any] = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ : Optional[Any] = encoding["""pixel_values"""]
snake_case_ : Optional[Any] = detr(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
snake_case_ : Optional[Any] = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename="""pytorch_model.bin""" ) )
snake_case_ : int = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
snake_case_ : List[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
snake_case_ : Dict = tensor_value
snake_case_ : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert tokenizer
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """umt5"""
_A : Any = ["""past_key_values"""]
def __init__(self , lowercase__=25_01_12 , lowercase__=5_12 , lowercase__=64 , lowercase__=10_24 , lowercase__=8 , lowercase__=None , lowercase__=6 , lowercase__=32 , lowercase__=1_28 , lowercase__=0.1 , lowercase__=1e-6 , lowercase__=1.0 , lowercase__="gated-gelu" , lowercase__=True , lowercase__=True , lowercase__="T5Tokenizer" , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=0 , **lowercase__ , ):
super().__init__(
is_encoder_decoder=lowercase__ , tokenizer_class=lowercase__ , tie_word_embeddings=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_model
snake_case_ : str = d_kv
snake_case_ : Optional[Any] = d_ff
snake_case_ : List[Any] = num_layers
snake_case_ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ : List[Any] = num_heads
snake_case_ : List[str] = relative_attention_num_buckets
snake_case_ : Optional[Any] = relative_attention_max_distance
snake_case_ : Tuple = dropout_rate
snake_case_ : Optional[int] = layer_norm_epsilon
snake_case_ : List[str] = initializer_factor
snake_case_ : List[str] = feed_forward_proj
snake_case_ : List[str] = use_cache
snake_case_ : Optional[int] = self.feed_forward_proj.split("""-""" )
snake_case_ : List[Any] = act_info[-1]
snake_case_ : int = act_info[0] == """gated"""
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
snake_case_ : str = """gelu_new"""
@property
def __UpperCamelCase (self ):
return self.d_model
@property
def __UpperCamelCase (self ):
return self.num_heads
@property
def __UpperCamelCase (self ):
return self.num_layers
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCamelCase (self ):
snake_case_ : Any = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
snake_case_ : Optional[int] = """past_encoder_sequence + sequence"""
snake_case_ : Dict = {0: """batch"""}
snake_case_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case_ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
snake_case_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCamelCase (self ):
return 13
@property
def __UpperCamelCase (self ):
return 5e-4
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if exitstatus == 5:
snake_case_ : List[Any] = 0
# Doctest custom flag to ignore output.
a_ = doctest.register_optionflag('''IGNORE_RESULT''')
a_ = doctest.OutputChecker
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase__ , lowercase__ , lowercase__ )
a_ = CustomOutputChecker
a_ = HfDoctestModule
a_ = HfDocTestParser
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = """xlm-prophetnet"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__(self , lowercase__ = 0.1 , lowercase__ = "gelu" , lowercase__ = 3_05_22 , lowercase__ = 10_24 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 5_12 , lowercase__ = 0.02 , lowercase__ = True , lowercase__ = True , lowercase__ = 0 , lowercase__ = 2 , lowercase__ = 32 , lowercase__ = 1_28 , lowercase__ = False , lowercase__ = 0.0 , lowercase__ = True , lowercase__ = 0 , lowercase__ = 1 , lowercase__ = 2 , **lowercase__ , ):
snake_case_ : Dict = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Tuple = encoder_ffn_dim
snake_case_ : Optional[Any] = num_encoder_layers
snake_case_ : Dict = num_encoder_attention_heads
snake_case_ : Dict = decoder_ffn_dim
snake_case_ : Union[str, Any] = num_decoder_layers
snake_case_ : Tuple = num_decoder_attention_heads
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = init_std # Normal(0, this parameter)
snake_case_ : Optional[Any] = activation_function
# parameters for xlmprophetnet
snake_case_ : Dict = ngram
snake_case_ : List[Any] = num_buckets
snake_case_ : Union[str, Any] = relative_max_distance
snake_case_ : Optional[int] = disable_ngram_loss
snake_case_ : Optional[int] = eps
# 3 Types of Dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : Any = dropout
snake_case_ : Tuple = use_cache
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
@property
def __UpperCamelCase (self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __UpperCamelCase (self , lowercase__ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
a_ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __UpperCamelCase (self , lowercase__=0 ):
snake_case_ : List[str] = np.random.RandomState(lowercase__ )
snake_case_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : Dict = pipe(**lowercase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Union[str, Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : str = pipe(**lowercase__ ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : int = self.get_dummy_inputs()
snake_case_ : Any = pipe(**lowercase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Dict = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : Optional[Any] = pipe(**lowercase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : Optional[Any] = pipe(**lowercase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : str = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : int = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowercase__ ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * [inputs["""prompt"""]]
# forward
snake_case_ : Optional[Any] = pipe(**lowercase__ )
snake_case_ : Optional[Any] = output.images[0, -3:, -3:, -1]
snake_case_ : Optional[int] = self.get_dummy_inputs()
snake_case_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
snake_case_ : str = pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""np""" , )
snake_case_ : Any = text_inputs["""input_ids"""]
snake_case_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
snake_case_ : Optional[int] = prompt_embeds
# forward
snake_case_ : Optional[int] = pipe(**lowercase__ )
snake_case_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * ["""this is a negative prompt"""]
snake_case_ : Any = negative_prompt
snake_case_ : Dict = 3 * [inputs["""prompt"""]]
# forward
snake_case_ : Optional[int] = pipe(**lowercase__ )
snake_case_ : List[Any] = output.images[0, -3:, -3:, -1]
snake_case_ : Optional[int] = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * [inputs.pop("""prompt""" )]
snake_case_ : List[Any] = []
for p in [prompt, negative_prompt]:
snake_case_ : str = pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""np""" , )
snake_case_ : Any = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
snake_case_ , snake_case_ : str = embeds
# forward
snake_case_ : str = pipe(**lowercase__ )
snake_case_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase (self ):
snake_case_ : str = ort.SessionOptions()
snake_case_ : Any = False
return options
def __UpperCamelCase (self ):
# using the PNDM scheduler by default
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Any = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
snake_case_ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
snake_case_ : List[Any] = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : str = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : List[Any] = """open neural network exchange"""
snake_case_ : str = np.random.RandomState(0 )
snake_case_ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type="""np""" )
snake_case_ : int = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Dict = """open neural network exchange"""
snake_case_ : Any = np.random.RandomState(0 )
snake_case_ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type="""np""" )
snake_case_ : Union[str, Any] = output.images
snake_case_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : Optional[int] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : str = 0
def test_callback_fn(lowercase__ , lowercase__ , lowercase__ ) -> None:
snake_case_ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
snake_case_ : str = latents[0, -3:, -3:, -1]
snake_case_ : str = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
snake_case_ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case_ : Optional[Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
snake_case_ : Union[str, Any] = False
snake_case_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[int] = """Andromeda galaxy in a bottle"""
snake_case_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase__ , lowercase__ )
assert pipe.safety_checker is None
snake_case_ : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
snake_case_ : int = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case_ : Optional[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.json'''}
a_ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
a_ = {'''mgp-str''': 27}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowercase__ , lowercase__="[GO]" , lowercase__="[GO]" , lowercase__="[s]" , lowercase__="[GO]" , **lowercase__ ):
super().__init__(
unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : str = json.load(lowercase__ )
snake_case_ : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def __UpperCamelCase (self ):
return len(self.vocab )
def __UpperCamelCase (self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Any = []
for s in text:
char_tokens.extend(lowercase__ )
return char_tokens
def __UpperCamelCase (self , lowercase__ ):
return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase (self , lowercase__ ):
return self.decoder.get(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not os.path.isdir(lowercase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowercase__ ) )
return
snake_case_ : Optional[Any] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + """\n""" )
return (vocab_file,)
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
snake_case_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : Dict = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
snake_case_ : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Tuple = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
snake_case_ : Optional[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : List[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
snake_case_ : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __UpperCamelCase (self ):
import torch
snake_case_ : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
snake_case_ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Tuple = pipeline("""text-classification""" )
snake_case_ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : List[str] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __UpperCamelCase (self ):
snake_case_ : int = pipeline("""text-classification""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = TextClassificationPipeline(model=lowercase__ , tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case_ : Union[str, Any] = """HuggingFace is in"""
snake_case_ : str = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
snake_case_ : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
snake_case_ : List[str] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}, {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case_ : List[Any] = text_classifier(lowercase__ , top_k=lowercase__ )
snake_case_ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N, [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N] , )
snake_case_ : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
snake_case_ : Optional[int] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case_ : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case_ : Union[str, Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowercase :
"""simple docstring"""
_A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowercase :
"""simple docstring"""
_A : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys())})
_A : str = field(metadata={"""help""": """Should contain the data files for the task."""})
_A : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_A : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ : str = processors[data_args.task_name]()
snake_case_ : Optional[Any] = processor.get_labels()
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ) -> Dict:
snake_case_ : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , p.label_ids )}
# Data collator
snake_case_ : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ : Optional[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : Any = trainer.evaluate()
snake_case_ : Any = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
return results
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
a_ = 65521
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : List[Any] = 1
snake_case_ : Tuple = 0
for plain_chr in plain_text:
snake_case_ : Optional[int] = (a + ord(SCREAMING_SNAKE_CASE__ )) % MOD_ADLER
snake_case_ : Tuple = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ = pytest.mark.integration
a_ = {'''comet'''}
a_ = importlib.util.find_spec('''fairseq''') is not None
a_ = {'''code_eval'''}
a_ = os.name == '''nt'''
a_ = {'''bertscore''', '''frugalscore''', '''perplexity'''}
a_ = importlib.util.find_spec('''transformers''') is not None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
@local
class __lowercase ( parameterized.TestCase):
"""simple docstring"""
_A : Optional[Any] = {}
_A : Any = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Union[str, Any] = """[...]"""
snake_case_ : Optional[int] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
snake_case_ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase__ )
# check parameters
snake_case_ : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case_ : Optional[int] = doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Any = """[...]"""
snake_case_ : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case_ : Optional[int] = doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase__ ):
yield
else:
yield
@contextmanager
def __UpperCamelCase (self ):
def load_local_metric(lowercase__ , *lowercase__ , **lowercase__ ):
return load_metric(os.path.join("""metrics""" , lowercase__ ) , *lowercase__ , **lowercase__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
snake_case_ : Union[str, Any] = load_local_metric
yield
@classmethod
def __UpperCamelCase (cls , lowercase__ ):
def wrapper(lowercase__ ):
snake_case_ : List[str] = contextmanager(lowercase__ )
snake_case_ : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
snake_case_ : Optional[int] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
snake_case_ : int = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE__ : int ):
class __lowercase :
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , *lowercase__ , **lowercase__ ):
assert len(lowercase__ ) == 2
snake_case_ : List[str] = [0.19, 0.92]
return scores, sum(lowercase__ ) / len(lowercase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
snake_case_ : List[Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
snake_case_ : List[Any] = load_from_checkpoint
yield
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
snake_case_ : Optional[Any] = """ERROR"""
snake_case_ : Dict = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[Any] = 2
snake_case_ : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a_ = True
except (ImportError, ModuleNotFoundError):
a_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
re.sub("""<n>""" , """""" , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
a_ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
a_ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
a_ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowercase ( datasets.Metric):
"""simple docstring"""
def __UpperCamelCase (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : Optional[int] = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in predictions] )
snake_case_ : Dict = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in references] )
else:
snake_case_ : Dict = np.asarray(lowercase__ )
snake_case_ : Optional[int] = np.asarray(lowercase__ )
if ignore_case:
snake_case_ : Optional[int] = np.char.lower(lowercase__ )
snake_case_ : int = np.char.lower(lowercase__ )
if ignore_punctuation:
snake_case_ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case_ : Optional[int] = np.char.translate(lowercase__ , table=lowercase__ )
snake_case_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
snake_case_ : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits )
snake_case_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
snake_case_ : Optional[Any] = np.char.translate(lowercase__ , table=lowercase__ )
snake_case_ : Optional[int] = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 1_00}
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int | float | str , SCREAMING_SNAKE_CASE__ : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
snake_case_ : List[Any] = int(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = int(SCREAMING_SNAKE_CASE__ )
snake_case_ : list[str] = []
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(f'1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input('''Enter the last number (nth term) of the P-Series'''))
a_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '''▁'''
a_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
a_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = ["""input_ids""", """attention_mask"""]
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__ = None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
snake_case_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ : Any = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase__ , tgt_lang=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
snake_case_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Dict = 1
snake_case_ : Any = len(self.sp_model )
snake_case_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
snake_case_ : Any = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ : str = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Optional[int] = self.lang_code_to_id[self._src_lang]
snake_case_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ):
snake_case_ : Tuple = self.__dict__.copy()
snake_case_ : Any = None
return state
def __setstate__(self , lowercase__ ):
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ : Dict = {}
snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase (self ):
snake_case_ : Tuple = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase (self , lowercase__ ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : List[Any] = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase (self , lowercase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[str] = []
snake_case_ : int = """"""
snake_case_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
snake_case_ : Optional[Any] = True
snake_case_ : List[str] = []
else:
current_sub_tokens.append(lowercase__ )
snake_case_ : Optional[int] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : str = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
snake_case_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
snake_case_ : Dict = [1] * len(self.prefix_tokens )
snake_case_ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : Tuple = src_lang
snake_case_ : int = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : str = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : str = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : Union[str, Any] = src_lang
snake_case_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = self.lang_code_to_id[src_lang]
snake_case_ : str = [self.cur_lang_code_id]
snake_case_ : str = [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[str] = self.lang_code_to_id[tgt_lang]
snake_case_ : str = [self.cur_lang_code_id]
snake_case_ : str = [self.eos_token_id]
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
raise RuntimeError("""CUDA out of memory.""" )
class __lowercase ( nn.Module):
"""simple docstring"""
def __init__(self ):
super().__init__()
snake_case_ : Union[str, Any] = nn.Linear(3 , 4 )
snake_case_ : Union[str, Any] = nn.BatchNormad(4 )
snake_case_ : Optional[Any] = nn.Linear(4 , 5 )
def __UpperCamelCase (self , lowercase__ ):
return self.lineara(self.batchnorm(self.lineara(lowercase__ ) ) )
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Any = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(lowercase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase__ , [1_28, 64, 32, 16, 8] )
def __UpperCamelCase (self ):
snake_case_ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowercase__ , lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(lowercase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case_ , snake_case_ : Tuple = mock_training_loop_function("""hello""" )
self.assertListEqual(lowercase__ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def __UpperCamelCase (self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase__ ):
pass
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __UpperCamelCase (self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __UpperCamelCase (self ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowercase__ , lowercase__ , lowercase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def __UpperCamelCase (self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase__ ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def __UpperCamelCase (self ):
snake_case_ : Tuple = torch.cuda.memory_allocated()
snake_case_ : int = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase__ )
snake_case_ : int = release_memory(lowercase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase__ )
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = CLIPTokenizer
_A : Any = CLIPTokenizerFast
_A : List[Any] = True
_A : Optional[Any] = {}
_A : int = False
def __UpperCamelCase (self ):
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : List[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
snake_case_ : Optional[int] = {"""unk_token""": """<unk>"""}
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = """lower newer"""
snake_case_ : str = """lower newer"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Optional[Any] = """lower newer"""
snake_case_ : Union[str, Any] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
snake_case_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Any = tokens + [tokenizer.unk_token]
snake_case_ : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def __UpperCamelCase (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : List[Any] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
snake_case_ : List[Any] = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Optional[Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : str = """xa\u0303y""" + """ """ + """x\xe3y"""
snake_case_ : Dict = tokenizer_s.tokenize(lowercase__ )
snake_case_ : List[str] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : Optional[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : Tuple = tokenizer_s.tokenize(lowercase__ )
snake_case_ : List[Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : Dict = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : Any = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Optional[int] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Dict = f'{text_of_1_token} {text_of_1_token}'
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : Union[str, Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
snake_case_ : Optional[int] = f' {text}'
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : List[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def __UpperCamelCase (self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __UpperCamelCase (self ):
super().test_tokenization_python_rust_equals()
def __UpperCamelCase (self ):
# CLIP always lower cases letters
pass
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = DebertaTokenizer
_A : Optional[int] = True
_A : Tuple = DebertaTokenizerFast
def __UpperCamelCase (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
snake_case_ : Optional[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ : int = {"""unk_token""": """[UNK]"""}
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Dict = """lower newer"""
snake_case_ : Tuple = """lower newer"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[Any] = """lower newer"""
snake_case_ : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case_ : Tuple = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = self.get_tokenizer()
snake_case_ : Optional[int] = tokenizer("""Hello""" , """World""" )
snake_case_ : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowercase__ )
@slow
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase__ )
snake_case_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
snake_case_ : int = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCamelCase (self ):
snake_case_ : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : str = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case_ : Optional[Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
snake_case_ : Optional[Any] = tokenizer(lowercase__ , padding=lowercase__ )
snake_case_ : List[str] = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
snake_case_ : Optional[Any] = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : str = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowercase__ )
for expected, decoded in zip(lowercase__ , lowercase__ ):
self.assertEqual(lowercase__ , lowercase__ )
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = """llama"""
_A : List[str] = ["""past_key_values"""]
def __init__(self , lowercase__=3_20_00 , lowercase__=40_96 , lowercase__=1_10_08 , lowercase__=32 , lowercase__=32 , lowercase__=None , lowercase__="silu" , lowercase__=20_48 , lowercase__=0.02 , lowercase__=1e-6 , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=1 , lowercase__=False , lowercase__=None , **lowercase__ , ):
snake_case_ : Tuple = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case_ : int = num_attention_heads
snake_case_ : Tuple = num_key_value_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = initializer_range
snake_case_ : Any = rms_norm_eps
snake_case_ : Tuple = pretraining_tp
snake_case_ : List[Any] = use_cache
snake_case_ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
snake_case_ : int = self.rope_scaling.get("""type""" , lowercase__ )
snake_case_ : Optional[Any] = self.rope_scaling.get("""factor""" , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.