code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = {}
def a__ (self, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = super().add_tokens(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def a__ (self, lowerCamelCase_, *lowerCamelCase_, lowerCamelCase_=1, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
lowerCamelCase__ : List[str] = []
for i in range(lowerCamelCase_ ):
lowerCamelCase__ : int = placeholder_token + f'''_{i}'''
self.try_adding_tokens(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase__ : Optional[Any] = output
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=1.0 ):
'''simple docstring'''
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Dict = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase__ : Optional[Any] = self.token_map[placeholder_token]
lowerCamelCase__ : Optional[int] = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase__ : str = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = text.replace(lowerCamelCase_, ' '.join(lowerCamelCase_ ) )
return text
def __call__(self, lowerCamelCase_, *lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=1.0, **lowerCamelCase_ ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_, vector_shuffle=lowerCamelCase_, prop_tokens_to_load=lowerCamelCase_ ), *lowerCamelCase_, **lowerCamelCase_, )
def a__ (self, lowerCamelCase_, *lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=1.0, **lowerCamelCase_ ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_, vector_shuffle=lowerCamelCase_, prop_tokens_to_load=lowerCamelCase_ ), *lowerCamelCase_, **lowerCamelCase_, )
| 316
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : int = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[str] = np.empty((rows, cols) , dtype=_lowerCamelCase )
lowerCamelCase__ : List[str] = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = heappop(_lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : Any = predecessors[x, y]
path.append(_lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Optional[Any] = dist + 1
lowerCamelCase__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 1
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 316
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Any = PegasusConfig
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=4_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : Dict = pad_token_id
lowerCamelCase__ : Optional[Any] = bos_token_id
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_pegasus_inputs_dict(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFPegasusModel(config=lowerCamelCase_ ).get_decoder()
lowerCamelCase__ : Any = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids[:1, :]
lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict['head_mask']
lowerCamelCase__ : str = 1
# first forward pass
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, head_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : List[str] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if attention_mask is None:
lowerCamelCase__ : Any = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCamelCase__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = True
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFPegasusModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCamelCase__ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCamelCase__ : int = 'google/pegasus-xsum'
@cached_property
def a__ (self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.translate_src_text(**lowerCamelCase_ )
assert self.expected_text == generated_words
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text, **lowerCamelCase_, padding=lowerCamelCase_, return_tensors='tf' )
lowerCamelCase__ : Tuple = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase_, )
lowerCamelCase__ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase_ )
return generated_words
@slow
def a__ (self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = CycleDiffusionPipeline
lowerCamelCase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
lowerCamelCase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
lowerCamelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
lowerCamelCase__ : Tuple = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', num_train_timesteps=1_0_0_0, clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : Union[str, Any] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Any = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[int] = self.get_dummy_components()
lowerCamelCase__ : int = CycleDiffusionPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Optional[int] = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase_, 'half' ):
lowerCamelCase__ : Union[str, Any] = module.half()
lowerCamelCase__ : Tuple = CycleDiffusionPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Tuple = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ (self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def a__ (self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def a__ (self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a__ (self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def a__ (self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCamelCase__ : int = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase__ : List[str] = DDIMScheduler.from_pretrained(lowerCamelCase_, subfolder='scheduler' )
lowerCamelCase__ : Dict = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase_, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, torch_dtype=torch.floataa, revision='fp16' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : str = 'A black colored car'
lowerCamelCase__ : Tuple = 'A blue colored car'
lowerCamelCase__ : Tuple = torch.manual_seed(0 )
lowerCamelCase__ : int = pipe(
prompt=lowerCamelCase_, source_prompt=lowerCamelCase_, image=lowerCamelCase_, num_inference_steps=1_0_0, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase_, output_type='np', )
lowerCamelCase__ : Optional[int] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCamelCase__ : Optional[Any] = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Tuple = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase__ : Any = DDIMScheduler.from_pretrained(lowerCamelCase_, subfolder='scheduler' )
lowerCamelCase__ : int = CycleDiffusionPipeline.from_pretrained(lowerCamelCase_, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] = 'A black colored car'
lowerCamelCase__ : Optional[Any] = 'A blue colored car'
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe(
prompt=lowerCamelCase_, source_prompt=lowerCamelCase_, image=lowerCamelCase_, num_inference_steps=1_0_0, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase_, output_type='np', )
lowerCamelCase__ : Any = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase="facebook/mbart-large-en-ro" , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
remove_ignore_keys_(_lowerCamelCase )
lowerCamelCase__ : List[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Optional[int] = MBartConfig.from_pretrained(_lowerCamelCase , vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
lowerCamelCase__ : Optional[int] = 'relu'
lowerCamelCase__ : List[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
A_ : Optional[int] = parser.parse_args()
A_ : List[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : List[str] = "▁"
A_ : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Optional[int] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[Any] = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
def __init__(self, lowerCamelCase_, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowerCamelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Tuple = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : int = self.__dict__.copy()
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ (self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ : Tuple = self.sp_model.PieceToId(lowerCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_, ' ' ).strip()
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 1
|
"""simple docstring"""
from math import pi, sqrt
def lowerCamelCase_ ( _lowerCamelCase ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(_lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(_lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ):
assert gamma(0.5 ) == sqrt(_lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Any = 1.0
while num:
A_ : Tuple = float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 316
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = word.split()
def justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
lowerCamelCase__ : Optional[Any] = max_width - width
lowerCamelCase__ : str = len(_lowerCamelCase )
if len(_lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase__ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase__ : Union[str, Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase__ : Optional[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowerCamelCase ):
num_spaces_between_words_list[i] += 1
lowerCamelCase__ : Dict = []
for i in range(_lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : Optional[int] = 0
for word in words:
if width + len(_lowerCamelCase ) + len(_lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowerCamelCase )
width += len(_lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
# reset new line and new width
lowerCamelCase__ , lowerCamelCase__ : Dict = [word], len(_lowerCamelCase )
lowerCamelCase__ : Dict = max_width - width - len(_lowerCamelCase )
answer.append(' '.join(_lowerCamelCase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = GPTaTokenizer
lowerCamelCase__ : Optional[int] = GPTaTokenizerFast
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = {'add_prefix_space': True}
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase__ : List[str] = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
lowerCamelCase__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'lower newer'
lowerCamelCase__ : int = 'lower newer'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : List[str] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = 'lower newer'
# Testing tokenization
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : str = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids without special tokens
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Dict = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids with special tokens
lowerCamelCase__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing the unknown token
lowerCamelCase__ : int = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# Simple input
lowerCamelCase__ : Optional[Any] = 'This is a simple input'
lowerCamelCase__ : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Any = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>' )
# Simple input
lowerCamelCase__ : Tuple = 'This is a simple input'
lowerCamelCase__ : str = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase__ : int = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase__ : Optional[int] = tokenizer.pad_token_id
lowerCamelCase__ : Any = tokenizer(lowerCamelCase_, padding='max_length', max_length=3_0, return_tensors='np' )
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : str = tokenizer(*lowerCamelCase_, padding='max_length', max_length=6_0, return_tensors='np' )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = '$$$'
lowerCamelCase__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase_, add_bos_token=lowerCamelCase_ )
lowerCamelCase__ : str = 'This is a simple input'
lowerCamelCase__ : int = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : List[str] = tokenizer.bos_token_id
lowerCamelCase__ : List[str] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0], lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ : int = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [self.get_tokenizer(do_lower_case=lowerCamelCase_, add_bos_token=lowerCamelCase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Optional[Any] = 'Encode this.'
lowerCamelCase__ : Optional[Any] = 'This one too please.'
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
encoded_sequence += tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer.encode_plus(
lowerCamelCase_, lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_special_tokens_mask=lowerCamelCase_, )
lowerCamelCase__ : str = encoded_sequence_dict['input_ids']
lowerCamelCase__ : Any = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
lowerCamelCase__ : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_ )
]
lowerCamelCase__ : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCamelCase_ )
lowerCamelCase__ : Tuple = 'A photo of a cat'
lowerCamelCase__ : Tuple = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase__ : Any = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m', use_slow=lowerCamelCase_ )
lowerCamelCase__ : int = 'A photo of a cat'
lowerCamelCase__ : Optional[Any] = tokenizer.encode(
lowerCamelCase_, )
# Same as above
self.assertEqual(lowerCamelCase_, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCamelCase_ )
lowerCamelCase__ : Dict = 'bos'
lowerCamelCase__ : List[Any] = tokenizer.get_vocab()['bos']
lowerCamelCase__ : List[Any] = 'A photo of a cat'
lowerCamelCase__ : Dict = tokenizer.encode(
lowerCamelCase_, )
# We changed the bos token
self.assertEqual(lowerCamelCase_, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase__ : Dict = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (UnCLIPScheduler,)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowerCamelCase_ )
return config
def a__ (self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase_, prev_timestep=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] = self.get_scheduler_config(variance_type='fixed_small_log' )
lowerCamelCase__ : str = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_994_987 ) ) < 1e-5
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config(variance_type='learned_range' )
lowerCamelCase__ : Dict = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 0.5
assert scheduler._get_variance(1, predicted_variance=lowerCamelCase_ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(4_8_7, predicted_variance=lowerCamelCase_ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(9_9_9, predicted_variance=lowerCamelCase_ ) - -0.0_010_011 < 1e-5
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Tuple = self.get_scheduler_config()
lowerCamelCase__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = scheduler.timesteps
lowerCamelCase__ : Dict = self.dummy_model()
lowerCamelCase__ : Tuple = self.dummy_sample_deter
lowerCamelCase__ : Any = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
lowerCamelCase__ : Any = model(lowerCamelCase_, lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : int = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowerCamelCase__ : str = pred_prev_sample
lowerCamelCase__ : List[Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : List[str] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ : Dict = scheduler.timesteps
lowerCamelCase__ : Union[str, Any] = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
lowerCamelCase__ : Dict = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, lowerCamelCase_ )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Any = scheduler.step(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, prev_timestep=lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Dict = pred_prev_sample
lowerCamelCase__ : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
| 316
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCamelCase__ : Union[str, Any] = (low + high) // 2
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = max_subarray(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = max_subarray(_lowerCamelCase , mid + 1 , _lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = max_cross_sum(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : str = float('-inf' ), -1
lowerCamelCase__ , lowerCamelCase__ : List[str] = float('-inf' ), -1
lowerCamelCase__ : int | float = 0
for i in range(_lowerCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCamelCase__ : Any = summ
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCamelCase__ : Tuple = summ
lowerCamelCase__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = [randint(1 , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
lowerCamelCase__ : str = time.time()
max_subarray(_lowerCamelCase , 0 , input_size - 1 )
lowerCamelCase__ : Union[str, Any] = time.time()
return end - start
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowerCamelCase__ : Tuple = [time_max_subarray(_lowerCamelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(_lowerCamelCase , _lowerCamelCase ):
print(_lowerCamelCase , '\t\t' , _lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 1
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=0.0, lowerCamelCase_ = None, lowerCamelCase_ = "geglu", lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = True, lowerCamelCase_ = "layer_norm", lowerCamelCase_ = False, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Union[str, Any] = only_cross_attention
lowerCamelCase__ : Dict = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
lowerCamelCase__ : Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[int] = AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[int] = AdaLayerNormZero(lowerCamelCase_, lowerCamelCase_ )
else:
lowerCamelCase__ : List[Any] = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
lowerCamelCase__ : List[str] = Attention(
query_dim=lowerCamelCase_, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase_, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase__ : Optional[int] = (
AdaLayerNorm(lowerCamelCase_, lowerCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
)
lowerCamelCase__ : List[Any] = Attention(
query_dim=lowerCamelCase_, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase_, dim_head=lowerCamelCase_, dropout=lowerCamelCase_, bias=lowerCamelCase_, upcast_attention=lowerCamelCase_, ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
# 3. Feed-forward
lowerCamelCase__ : str = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = FeedForward(lowerCamelCase_, dropout=lowerCamelCase_, activation_fn=lowerCamelCase_, final_dropout=lowerCamelCase_ )
# let chunk size default to None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : str = 0
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = chunk_size
lowerCamelCase__ : Optional[Any] = dim
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ):
'''simple docstring'''
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_, lowerCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = self.norma(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase__ : int = self.norma(lowerCamelCase_ )
lowerCamelCase__ : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase__ : str = self.attna(
lowerCamelCase_, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase_, **lowerCamelCase_, )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : int = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase__ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase__ : Tuple = (
self.norma(lowerCamelCase_, lowerCamelCase_ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase_ )
)
lowerCamelCase__ : Dict = self.attna(
lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, attention_mask=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : str = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase__ : Optional[int] = self.norma(lowerCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCamelCase__ : int = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase__ : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase_ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase_, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
lowerCamelCase__ : List[str] = self.ff(lowerCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase__ : int = ff_output + hidden_states
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 4, lowerCamelCase_ = 0.0, lowerCamelCase_ = "geglu", lowerCamelCase_ = False, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Tuple = int(dim * mult )
lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase__ : Optional[Any] = GELU(lowerCamelCase_, lowerCamelCase_ )
if activation_fn == "gelu-approximate":
lowerCamelCase__ : Dict = GELU(lowerCamelCase_, lowerCamelCase_, approximate='tanh' )
elif activation_fn == "geglu":
lowerCamelCase__ : str = GEGLU(lowerCamelCase_, lowerCamelCase_ )
elif activation_fn == "geglu-approximate":
lowerCamelCase__ : Dict = ApproximateGELU(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase_ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase_ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase_, lowerCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase_ ) )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for module in self.net:
lowerCamelCase__ : Optional[Any] = module(lowerCamelCase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = "none" ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Linear(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = approximate
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase_, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.proj(lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.gelu(lowerCamelCase_ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Union[str, Any] = nn.Linear(lowerCamelCase_, dim_out * 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.proj(lowerCamelCase_ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase_ )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Any = nn.Linear(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.proj(lowerCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Embedding(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = nn.SiLU()
lowerCamelCase__ : int = nn.Linear(lowerCamelCase_, embedding_dim * 2 )
lowerCamelCase__ : Tuple = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.linear(self.silu(self.emb(lowerCamelCase_ ) ) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.chunk(lowerCamelCase_, 2 )
lowerCamelCase__ : Optional[int] = self.norm(lowerCamelCase_ ) * (1 + scale) + shift
return x
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Tuple = CombinedTimestepLabelEmbeddings(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = nn.SiLU()
lowerCamelCase__ : List[Any] = nn.Linear(lowerCamelCase_, 6 * embedding_dim, bias=lowerCamelCase_ )
lowerCamelCase__ : int = nn.LayerNorm(lowerCamelCase_, elementwise_affine=lowerCamelCase_, eps=1e-6 )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.linear(self.silu(self.emb(lowerCamelCase_, lowerCamelCase_, hidden_dtype=lowerCamelCase_ ) ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = emb.chunk(6, dim=1 )
lowerCamelCase__ : List[str] = self.norm(lowerCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = 1e-5 ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[Any] = num_groups
lowerCamelCase__ : List[Any] = eps
if act_fn is None:
lowerCamelCase__ : int = None
else:
lowerCamelCase__ : Optional[int] = get_activation(lowerCamelCase_ )
lowerCamelCase__ : List[str] = nn.Linear(lowerCamelCase_, out_dim * 2 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.act:
lowerCamelCase__ : List[Any] = self.act(lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.linear(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = emb[:, :, None, None]
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.chunk(2, dim=1 )
lowerCamelCase__ : int = F.group_norm(lowerCamelCase_, self.num_groups, eps=self.eps )
lowerCamelCase__ : Optional[int] = x * (1 + scale) + shift
return x
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class a_ ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_tool('text-classification' )
self.tool.setup()
lowerCamelCase__ : Dict = load_tool('text-classification', remote=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(lowerCamelCase_, 'positive' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.remote_tool('That\'s quite cool', ['positive', 'negative'] )
self.assertEqual(lowerCamelCase_, 'positive' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(lowerCamelCase_, 'positive' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.remote_tool(text='That\'s quite cool', labels=['positive', 'negative'] )
self.assertEqual(lowerCamelCase_, 'positive' )
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ : Optional[int] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ : Optional[Any] = f'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = requirement, None, None
else:
lowerCamelCase__ : Optional[int] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _lowerCamelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f''' got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Tuple = match[0]
lowerCamelCase__ : Tuple = want_full.split(',' ) # there could be multiple requirements
lowerCamelCase__ : Optional[int] = {}
for w in want_range:
lowerCamelCase__ : List[Any] = re.findall(r'^([\s!=<>]{1,2})(.+)' , _lowerCamelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f''' but got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Any = match[0]
lowerCamelCase__ : Optional[Any] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowerCamelCase__ : Union[str, Any] = '.'.join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
lowerCamelCase__ : Dict = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_lowerCamelCase , _lowerCamelCase )
| 316
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
A_, A_ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
A_ : Optional[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
A_ : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ : List[str] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 316
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 1
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase_ ( _lowerCamelCase=None , _lowerCamelCase=None ):
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : str = field(
metadata={'help': 'The csv file to plot.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowerCamelCase__ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowerCamelCase__ : Optional[List[str]] = list_field(
default=snake_case_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowerCamelCase_ ( _lowerCamelCase ):
try:
int(_lowerCamelCase )
return True
except ValueError:
return False
def lowerCamelCase_ ( _lowerCamelCase ):
try:
float(_lowerCamelCase )
return True
except ValueError:
return False
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = args
lowerCamelCase__ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file, newline='' ) as csv_file:
lowerCamelCase__ : Any = csv.DictReader(lowerCamelCase_ )
for row in reader:
lowerCamelCase__ : Any = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCamelCase__ : Optional[Any] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCamelCase__ : Tuple = float(row['result'] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = plt.subplots()
lowerCamelCase__ : Dict = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCamelCase__ : Optional[Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCamelCase__ : Dict = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCamelCase__ : Any = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCamelCase__ : int = self.result_dict[model_name]['result']
((lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCamelCase__ : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCamelCase__ : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=lowerCamelCase_, )
else:
lowerCamelCase__ : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.floataa, )
((lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCamelCase__ : int = np.asarray(lowerCamelCase_, lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_, lowerCamelCase_, label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_, lowerCamelCase_, '--' )
title_str += f''' {label_model_name} vs.'''
lowerCamelCase__ : Any = title_str[:-4]
lowerCamelCase__ : List[str] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
lowerCamelCase__ : Dict = Plot(args=_lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=7, lowerCamelCase_=3, lowerCamelCase_=1_8, lowerCamelCase_=3_0, lowerCamelCase_=4_0_0, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
lowerCamelCase__ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[Any] = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : Optional[int] = crop_size
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Any = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : Dict = do_reduce_labels
def a__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : str = Image.open(dataset[0]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(dataset[1]['file'] )
return image, map
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : int = Image.open(ds[0]['file'] )
lowerCamelCase__ : Tuple = Image.open(ds[1]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(ds[2]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BeitImageProcessingTester(self )
@property
def a__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_, 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_std' ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 2_0, 'width': 2_0} )
self.assertEqual(image_processor.crop_size, {'height': 1_8, 'width': 1_8} )
self.assertEqual(image_processor.do_reduce_labels, lowerCamelCase_ )
lowerCamelCase__ : int = self.image_processing_class.from_dict(
self.image_processor_dict, size=4_2, crop_size=8_4, reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size, {'height': 4_2, 'width': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
self.assertEqual(image_processor.do_reduce_labels, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, np.ndarray )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : Dict = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : str = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
lowerCamelCase__ : Tuple = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0], maps[0], return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : int = prepare_semantic_single_inputs()
lowerCamelCase__ : Optional[Any] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : int = prepare_semantic_batch_inputs()
lowerCamelCase__ : Optional[int] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ , lowerCamelCase__ : str = prepare_semantic_single_inputs()
lowerCamelCase__ : List[str] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_5_0 )
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) == 0:
return False
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCamelCase )
if __name__ == "__main__":
A_ : Tuple = input("Enter numbers separated by comma:\n").strip()
A_ : Dict = [int(item.strip()) for item in user_input.split(",")]
A_ : Dict = int(input("Enter the number to be found in the list:\n").strip())
A_ : Any = "" if binary_search(sequence, target) else "not "
print(f"{target} was {not_str}found in {sequence}")
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = [1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0, 0
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
lowerCamelCase__ : Any = ugly_nums[ia] * 3
lowerCamelCase__ : int = ugly_nums[ia] * 5
for _ in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[str] = min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"{ugly_numbers(2_00) = }")
| 316
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=7, lowerCamelCase_=3, lowerCamelCase_=3_0, lowerCamelCase_=4_0_0, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=True, lowerCamelCase_=1 / 2_5_5, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : int = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean
lowerCamelCase__ : Any = image_std
lowerCamelCase__ : Dict = do_rescale
lowerCamelCase__ : Any = rescale_factor
lowerCamelCase__ : Optional[Any] = do_pad
def a__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ (self, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
if not batched:
lowerCamelCase__ : int = image_inputs[0]
if isinstance(lowerCamelCase_, Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
lowerCamelCase__ : Any = self.size['shortest_edge']
elif w > h:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : List[str] = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : int = self.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Union[str, Any] = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[0] )[0]
lowerCamelCase__ : Dict = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = DeformableDetrImageProcessor if is_vision_available() else None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = DeformableDetrImageProcessingTester(self )
@property
def a__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_, 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_rescale' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_pad' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'size' ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict, size=4_2, max_size=8_4, pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ : Optional[int] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
lowerCamelCase__ : Tuple = json.loads(f.read() )
lowerCamelCase__ : Dict = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCamelCase__ : Any = DeformableDetrImageProcessor()
lowerCamelCase__ : List[Any] = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowerCamelCase__ : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowerCamelCase__ : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1e-4 ) )
# verify area
lowerCamelCase__ : Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowerCamelCase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowerCamelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify orig_size
lowerCamelCase__ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowerCamelCase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
lowerCamelCase__ : Any = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCamelCase__ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase__ : Union[str, Any] = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCamelCase__ : str = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, masks_path=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowerCamelCase__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowerCamelCase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowerCamelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowerCamelCase__ : str = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify masks
lowerCamelCase__ : int = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), lowerCamelCase_ )
# verify orig_size
lowerCamelCase__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['vqvae']
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_, mel=lowerCamelCase_, vqvae=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
return 5_0 if isinstance(self.scheduler, lowerCamelCase_ ) else 1_0_0_0
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = None, lowerCamelCase_ = 0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase_ )
lowerCamelCase__ : List[str] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCamelCase__ : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCamelCase__ : Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=lowerCamelCase_, device=self.device, )
lowerCamelCase__ : Optional[int] = noise
lowerCamelCase__ : List[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = self.mel.audio_slice_to_image(lowerCamelCase_ )
lowerCamelCase__ : Any = np.frombuffer(input_image.tobytes(), dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCamelCase__ : int = (input_image / 2_5_5) * 2 - 1
lowerCamelCase__ : int = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCamelCase__ : Any = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_, 0 ) ).latent_dist.sample(
generator=lowerCamelCase_ )[0]
lowerCamelCase__ : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCamelCase__ : Dict = self.scheduler.add_noise(lowerCamelCase_, lowerCamelCase_, self.scheduler.timesteps[start_step - 1] )
lowerCamelCase__ : Union[str, Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCamelCase__ : Dict = int(mask_start_secs * pixels_per_second )
lowerCamelCase__ : Any = int(mask_end_secs * pixels_per_second )
lowerCamelCase__ : str = self.scheduler.add_noise(lowerCamelCase_, lowerCamelCase_, torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet, lowerCamelCase_ ):
lowerCamelCase__ : str = self.unet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )['sample']
else:
lowerCamelCase__ : int = self.unet(lowerCamelCase_, lowerCamelCase_ )['sample']
if isinstance(self.scheduler, lowerCamelCase_ ):
lowerCamelCase__ : Any = self.scheduler.step(
model_output=lowerCamelCase_, timestep=lowerCamelCase_, sample=lowerCamelCase_, eta=lowerCamelCase_, generator=lowerCamelCase_, )['prev_sample']
else:
lowerCamelCase__ : Union[str, Any] = self.scheduler.step(
model_output=lowerCamelCase_, timestep=lowerCamelCase_, sample=lowerCamelCase_, generator=lowerCamelCase_, )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCamelCase__ : int = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCamelCase__ : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCamelCase__ : Dict = 1 / self.vqvae.config.scaling_factor * images
lowerCamelCase__ : List[Any] = self.vqvae.decode(lowerCamelCase_ )['sample']
lowerCamelCase__ : List[str] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase__ : Optional[int] = images.cpu().permute(0, 2, 3, 1 ).numpy()
lowerCamelCase__ : Optional[Any] = (images * 2_5_5).round().astype('uint8' )
lowerCamelCase__ : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase_, mode='RGB' ).convert('L' ) for _ in images) )
lowerCamelCase__ : Tuple = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ), **ImagePipelineOutput(lowerCamelCase_ ) )
@torch.no_grad()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = 5_0 ):
'''simple docstring'''
assert isinstance(self.scheduler, lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ )
lowerCamelCase__ : Any = np.array(
[np.frombuffer(image.tobytes(), dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCamelCase__ : Union[str, Any] = (sample / 2_5_5) * 2 - 1
lowerCamelCase__ : str = torch.Tensor(lowerCamelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,) ) ):
lowerCamelCase__ : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCamelCase__ : Optional[Any] = self.scheduler.alphas_cumprod[t]
lowerCamelCase__ : Union[str, Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCamelCase__ : Optional[Any] = 1 - alpha_prod_t
lowerCamelCase__ : Tuple = self.unet(lowerCamelCase_, lowerCamelCase_ )['sample']
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCamelCase__ : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCamelCase__ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = acos(torch.dot(torch.flatten(lowerCamelCase_ ), torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = [[float('inf' ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
lowerCamelCase__ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase__ : Union[str, Any] = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
A_ : Optional[Any] = int(input("Enter number of vertices: "))
A_ : Optional[Any] = int(input("Enter number of edges: "))
A_ : Union[str, Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
A_ : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
A_ : Any = int(input("Enter source:"))
A_ : Dict = int(input("Enter destination:"))
A_ : List[str] = float(input("Enter weight:"))
A_ : Optional[int] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 316
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase__ : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase__ : Tuple = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase__ , lowerCamelCase__ : int = matrix[1][1], matrix[0][0]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase__ : List[Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCamelCase__ : Tuple = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase__ : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase__ : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase__ : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase__ : Union[str, Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase__ : Union[str, Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase__ : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase__ : Union[str, Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase__ : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase__ : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase__ : Any = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase__ : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase__ : List[str] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_0_0, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=[0, 1, 2, 3], ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Dict = 1_0_0
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : str = scope
lowerCamelCase__ : Optional[int] = out_indices
lowerCamelCase__ : Union[str, Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : str = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ (self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = BeitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = BeitForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : List[str] = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Any = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : int = BeitForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BeitModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]:
continue
lowerCamelCase__ : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : List[str] = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : str = model(**lowerCamelCase_ ).loss
loss.backward()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(**lowerCamelCase_ ).loss
loss.backward()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = BeitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(lowerCamelCase_ )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : int = image_processor(images=lowerCamelCase_, return_tensors='pt' ).pixel_values.to(lowerCamelCase_ )
# prepare bool_masked_pos
lowerCamelCase__ : Any = torch.ones((1, 1_9_6), dtype=torch.bool ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(pixel_values=lowerCamelCase_, bool_masked_pos=lowerCamelCase_ )
lowerCamelCase__ : List[str] = outputs.logits
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], lowerCamelCase_, atol=1e-2 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Any = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**lowerCamelCase_ )
lowerCamelCase__ : Any = outputs.logits
# verify the logits
lowerCamelCase__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : str = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
lowerCamelCase_ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Any = model(**lowerCamelCase_ )
lowerCamelCase__ : int = outputs.logits
# verify the logits
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : int = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : Tuple = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
lowerCamelCase__ : Optional[int] = model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = BeitImageProcessor(do_resize=lowerCamelCase_, size=6_4_0, do_center_crop=lowerCamelCase_ )
lowerCamelCase__ : List[str] = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
lowerCamelCase__ : Optional[Any] = Image.open(ds[0]['file'] )
lowerCamelCase__ : Optional[Any] = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**lowerCamelCase_ )
lowerCamelCase__ : Dict = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Dict = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
lowerCamelCase__ : List[str] = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=lowerCamelCase_, )
else:
lowerCamelCase__ : Tuple = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=lowerCamelCase_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
lowerCamelCase__ : List[Any] = model.to(lowerCamelCase_ )
lowerCamelCase__ : Any = BeitImageProcessor(do_resize=lowerCamelCase_, size=6_4_0, do_center_crop=lowerCamelCase_ )
lowerCamelCase__ : str = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
lowerCamelCase__ : List[str] = Image.open(ds[0]['file'] )
lowerCamelCase__ : Tuple = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = outputs.logits.detach().cpu()
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_, target_sizes=[(5_0_0, 3_0_0)] )
lowerCamelCase__ : int = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
| 316
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Tuple = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PegasusTokenizer
lowerCamelCase__ : Dict = PegasusTokenizerFast
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Optional[Any] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] = PegasusTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = '</s>'
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<pad>' )
self.assertEqual(vocab_keys[1], '</s>' )
self.assertEqual(vocab_keys[-1], 'v' )
self.assertEqual(len(lowerCamelCase_ ), 1_1_0_3 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_1_0_3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCamelCase__ : Any = rust_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
lowerCamelCase__ : Any = py_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase__ : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCamelCase__ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase__ : Union[str, Any] = tokenizer([raw_input_str], return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase__ : List[str] = 'To ensure a smooth flow of bank resolutions.'
lowerCamelCase__ : int = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase__ : List[Any] = tokenizer([raw_input_str], return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['This is going to be way too long.' * 1_5_0, 'short example']
lowerCamelCase__ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCamelCase__ : Optional[int] = self._large_tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = self._large_tokenizer(
text_target=lowerCamelCase_, max_length=5, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='google/bigbird-pegasus-large-arxiv', revision='ba85d0851d708441f91440d509690f1ab6353415', )
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PegasusTokenizer
lowerCamelCase__ : List[str] = PegasusTokenizerFast
lowerCamelCase__ : Dict = True
lowerCamelCase__ : str = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = PegasusTokenizer(lowerCamelCase_, offset=0, mask_token_sent=lowerCamelCase_, mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCamelCase__ : List[str] = rust_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
lowerCamelCase__ : Optional[int] = py_tokenizer([raw_input_str], return_tensors=lowerCamelCase_, add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['This is going to be way too long.' * 1_0_0_0, 'short example']
lowerCamelCase__ : Tuple = ['not super long but more than 5 tokens', 'tiny']
lowerCamelCase__ : Optional[Any] = self._large_tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = self._large_tokenizer(
text_target=lowerCamelCase_, max_length=5, padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCamelCase__ : List[Any] = self._large_tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(
lowerCamelCase_, [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1], )
| 316
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 1
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = flatten_dict(_lowerCamelCase )
return flax_params
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[Any] = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
lowerCamelCase__ : Union[str, Any] = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase__ : Union[str, Any] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase__ : int = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase__ : Optional[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase__ : List[Any] = re.sub(r'layers_(\d+)' , r'layer.\1' , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase__ : int = re.sub(r'layers_(\d+)' , r'layer.\1' , _lowerCamelCase )
lowerCamelCase__ : Any = flax_dict[key]
lowerCamelCase__ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase__ : Dict = torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase__ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : List[Any] = get_flax_param(_lowerCamelCase )
if not use_large:
lowerCamelCase__ : Optional[Any] = PixaStructVisionConfig()
lowerCamelCase__ : Dict = PixaStructTextConfig()
else:
lowerCamelCase__ : Optional[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase__ : Union[str, Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase__ : Optional[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
lowerCamelCase__ : Any = PixaStructForConditionalGeneration(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
lowerCamelCase__ : Tuple = PixaStructImageProcessor()
lowerCamelCase__ : Tuple = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
lowerCamelCase__ : Union[str, Any] = 4096
lowerCamelCase__ : int = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print('Model saved in {}'.format(_lowerCamelCase ) )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
A_ : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 316
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
lowerCamelCase__ : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase_ ( _lowerCamelCase ):
if len(_lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
lowerCamelCase__ : Any = matrix_length // 2
lowerCamelCase__ : Tuple = [[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase )]
lowerCamelCase__ : str = [
[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )
]
lowerCamelCase__ : Dict = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase )]
lowerCamelCase__ : List[str] = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase_ ( _lowerCamelCase ):
return len(_lowerCamelCase ), len(matrix[0] )
def lowerCamelCase_ ( _lowerCamelCase ):
print('\n'.join(str(_lowerCamelCase ) for line in matrix ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if matrix_dimensions(_lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = split_matrix(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = split_matrix(_lowerCamelCase )
lowerCamelCase__ : Dict = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Tuple = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : int = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : List[str] = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : int = matrix_addition(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : Dict = matrix_addition(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if matrix_dimensions(_lowerCamelCase )[1] != matrix_dimensions(_lowerCamelCase )[0]:
lowerCamelCase__ : List[Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = matrix_dimensions(_lowerCamelCase )
lowerCamelCase__ : Dict = matrix_dimensions(_lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ : int = max(*_lowerCamelCase , *_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(_lowerCamelCase ) ) ) )
lowerCamelCase__ : Union[str, Any] = matrixa
lowerCamelCase__ : Union[str, Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ : List[str] = actual_strassen(_lowerCamelCase , _lowerCamelCase )
# Removing the additional zeros
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A_ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A_ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 316
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'van'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=[7, 3, 3, 3], lowerCamelCase_=[4, 2, 2, 2], lowerCamelCase_=[6_4, 1_2_8, 3_2_0, 5_1_2], lowerCamelCase_=[3, 3, 1_2, 3], lowerCamelCase_=[8, 8, 4, 4], lowerCamelCase_="gelu", lowerCamelCase_=0.02, lowerCamelCase_=1e-6, lowerCamelCase_=1e-2, lowerCamelCase_=0.0, lowerCamelCase_=0.0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : List[str] = patch_sizes
lowerCamelCase__ : int = strides
lowerCamelCase__ : Optional[int] = hidden_sizes
lowerCamelCase__ : Optional[Any] = depths
lowerCamelCase__ : List[str] = mlp_ratios
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = layer_scale_init_value
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Union[str, Any] = dropout_rate
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCamelCase )
if n > 1:
factors.append(_lowerCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
A_ : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase_ ( ):
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_lowerCamelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_lowerCamelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_lowerCamelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_lowerCamelCase , default='data/dump' , help='The dump file prefix.' )
lowerCamelCase__ : Dict = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase__ : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Any = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCamelCase__ : str = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase__ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCamelCase__ : Union[str, Any] = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCamelCase__ : Tuple = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCamelCase__ : str = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(_lowerCamelCase )} examples to process.''' )
lowerCamelCase__ : int = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Union[str, Any] = 1_0000
lowerCamelCase__ : Tuple = time.time()
for text in data:
lowerCamelCase__ : Optional[Any] = f'''{bos} {text.strip()} {sep}'''
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
lowerCamelCase__ : Union[str, Any] = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase__ : List[Any] = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(_lowerCamelCase )} examples processed.''' )
lowerCamelCase__ : Any = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase__ : List[Any] = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
lowerCamelCase__ : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase_ ( _lowerCamelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase_ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ : Dict = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = [1, 2]
lowerCamelCase__ : Tuple = {'a': 1, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase__ : Optional[int] = {'a': {'1': 1}, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase__ : List[str] = [2, 3]
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3}
lowerCamelCase__ : Optional[Any] = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase__ : Any = {'a': {'1': 2}, 'b': 3}
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
lowerCamelCase__ : str = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, split=lowerCamelCase_, features=lowerCamelCase_, cache_dir=lowerCamelCase_, keep_in_memory=lowerCamelCase_, streaming=lowerCamelCase_, num_proc=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : str = path_or_paths if isinstance(lowerCamelCase_, lowerCamelCase_ ) else {self.split: path_or_paths}
lowerCamelCase__ : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowerCamelCase__ : List[Any] = Parquet(
cache_dir=lowerCamelCase_, data_files=lowerCamelCase_, features=lowerCamelCase_, hash=lowerCamelCase_, **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
if self.streaming:
lowerCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_, download_mode=lowerCamelCase_, verification_mode=lowerCamelCase_, base_path=lowerCamelCase_, num_proc=self.num_proc, )
lowerCamelCase__ : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase_, in_memory=self.keep_in_memory )
return dataset
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : List[Any] = path_or_buf
lowerCamelCase__ : Tuple = batch_size or get_writer_batch_size(dataset.features )
lowerCamelCase__ : str = parquet_writer_kwargs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, 'wb+' ) as buffer:
lowerCamelCase__ : List[str] = self._write(file_obj=lowerCamelCase_, batch_size=lowerCamelCase_, **self.parquet_writer_kwargs )
else:
lowerCamelCase__ : int = self._write(file_obj=self.path_or_buf, batch_size=lowerCamelCase_, **self.parquet_writer_kwargs )
return written
def a__ (self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 0
lowerCamelCase__ : List[str] = parquet_writer_kwargs.pop('path_or_buf', lowerCamelCase_ )
lowerCamelCase__ : str = self.dataset.features.arrow_schema
lowerCamelCase__ : str = pq.ParquetWriter(lowerCamelCase_, schema=lowerCamelCase_, **lowerCamelCase_ )
for offset in logging.tqdm(
range(0, len(self.dataset ), lowerCamelCase_ ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ):
lowerCamelCase__ : List[Any] = query_table(
table=self.dataset._data, key=slice(lowerCamelCase_, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(lowerCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 316
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : List[Any] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
lowerCamelCase__ : int = 0
lowerCamelCase__ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase__ : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase__ : int = counter
if counter > pre_counter:
lowerCamelCase__ : List[str] = inputa
lowerCamelCase__ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = SwinConfig()
lowerCamelCase__ : List[str] = swin_name.split('_' )
lowerCamelCase__ : Tuple = name_split[1]
lowerCamelCase__ : List[str] = int(name_split[4] )
lowerCamelCase__ : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : Union[str, Any] = 96
lowerCamelCase__ : Any = (2, 2, 6, 2)
lowerCamelCase__ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : List[Any] = 96
lowerCamelCase__ : List[Any] = (2, 2, 18, 2)
lowerCamelCase__ : Any = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Union[str, Any] = 128
lowerCamelCase__ : List[Any] = (2, 2, 18, 2)
lowerCamelCase__ : Union[str, Any] = (4, 8, 16, 32)
else:
lowerCamelCase__ : int = 192
lowerCamelCase__ : List[str] = (2, 2, 18, 2)
lowerCamelCase__ : str = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : Tuple = 2_1841
else:
lowerCamelCase__ : List[Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = img_size
lowerCamelCase__ : Tuple = num_classes
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : List[Any] = depths
lowerCamelCase__ : Optional[int] = num_heads
lowerCamelCase__ : Optional[int] = window_size
return config
def lowerCamelCase_ ( _lowerCamelCase ):
if "patch_embed.proj" in name:
lowerCamelCase__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCamelCase__ : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowerCamelCase__ : Any = 'encoder.' + name
if "attn.proj" in name:
lowerCamelCase__ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : List[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
lowerCamelCase__ : Any = 'layernorm.weight'
if name == "norm.bias":
lowerCamelCase__ : str = 'layernorm.bias'
if "head" in name:
lowerCamelCase__ : str = name.replace('head' , 'classifier' )
else:
lowerCamelCase__ : List[str] = 'swin.' + name
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : int = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : Optional[int] = key.split('.' )
lowerCamelCase__ : Any = int(key_split[1] )
lowerCamelCase__ : List[str] = int(key_split[3] )
lowerCamelCase__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Tuple = val[:dim, :]
lowerCamelCase__ : int = val[
dim : dim * 2, :
]
lowerCamelCase__ : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase__ : List[Any] = val[
:dim
]
lowerCamelCase__ : List[Any] = val[
dim : dim * 2
]
lowerCamelCase__ : Optional[Any] = val[
-dim:
]
else:
lowerCamelCase__ : Union[str, Any] = val
return orig_state_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
lowerCamelCase__ : int = get_swin_config(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = SwinForImageClassification(_lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
lowerCamelCase__ : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : List[str] = image_processor(images=_lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Any = timm_model(inputs['pixel_values'] )
lowerCamelCase__ : Optional[Any] = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A_ : List[str] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = {}
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase__ : Union[str, Any] = [[w, v]]
if not self.graph.get(lowerCamelCase_ ):
lowerCamelCase__ : int = []
def a__ (self ):
'''simple docstring'''
return list(self.graph )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
def a__ (self, lowerCamelCase_=-2, lowerCamelCase_=-1 ):
'''simple docstring'''
if s == d:
return []
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = []
if s == -2:
lowerCamelCase__ : Dict = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : Union[str, Any] = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : Tuple = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a__ (self, lowerCamelCase_=-1 ):
'''simple docstring'''
if c == -1:
lowerCamelCase__ : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
lowerCamelCase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_, lowerCamelCase_, 1 )
def a__ (self, lowerCamelCase_=-2 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = deque()
lowerCamelCase__ : Dict = []
if s == -2:
lowerCamelCase__ : Optional[int] = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
lowerCamelCase__ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return len(self.graph[u] )
def a__ (self, lowerCamelCase_=-2 ):
'''simple docstring'''
lowerCamelCase__ : Any = []
lowerCamelCase__ : List[str] = []
if s == -2:
lowerCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : Dict = s
lowerCamelCase__ : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : List[str] = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return sorted_nodes
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
lowerCamelCase__ : str = []
lowerCamelCase__ : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : Tuple = -2
lowerCamelCase__ : Any = []
lowerCamelCase__ : List[str] = s
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : Optional[int] = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Optional[int] = True
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : List[Any] = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : Union[str, Any] = False
indirect_parents.append(lowerCamelCase_ )
lowerCamelCase__ : str = s
lowerCamelCase__ : Any = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = -2
lowerCamelCase__ : str = []
lowerCamelCase__ : Optional[Any] = s
lowerCamelCase__ : Any = False
lowerCamelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : Optional[int] = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Dict = True
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : Tuple = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : Tuple = False
indirect_parents.append(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = s
lowerCamelCase__ : Any = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a__ (self, lowerCamelCase_=-2, lowerCamelCase_=-1 ):
'''simple docstring'''
lowerCamelCase__ : str = time()
self.dfs(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = time()
return end - begin
def a__ (self, lowerCamelCase_=-2 ):
'''simple docstring'''
lowerCamelCase__ : List[str] = time()
self.bfs(lowerCamelCase_ )
lowerCamelCase__ : str = time()
return end - begin
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {}
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase__ : Optional[int] = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase__ : List[Any] = [[w, u]]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
# the other way round
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase_ )
def a__ (self, lowerCamelCase_=-2, lowerCamelCase_=-1 ):
'''simple docstring'''
if s == d:
return []
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Dict = []
if s == -2:
lowerCamelCase__ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : str = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a__ (self, lowerCamelCase_=-1 ):
'''simple docstring'''
if c == -1:
lowerCamelCase__ : Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
lowerCamelCase__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_, lowerCamelCase_, 1 )
def a__ (self, lowerCamelCase_=-2 ):
'''simple docstring'''
lowerCamelCase__ : List[str] = deque()
lowerCamelCase__ : Any = []
if s == -2:
lowerCamelCase__ : Tuple = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
lowerCamelCase__ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return len(self.graph[u] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : Dict = -2
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = s
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : int = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Tuple = True
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : Any = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : List[Any] = False
indirect_parents.append(lowerCamelCase_ )
lowerCamelCase__ : Tuple = s
lowerCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = -2
lowerCamelCase__ : int = []
lowerCamelCase__ : List[Any] = s
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : int = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Tuple = True
if len(lowerCamelCase_ ) != 0:
lowerCamelCase__ : Dict = stack[len(lowerCamelCase_ ) - 1]
else:
lowerCamelCase__ : Any = False
indirect_parents.append(lowerCamelCase_ )
lowerCamelCase__ : str = s
lowerCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a__ (self ):
'''simple docstring'''
return list(self.graph )
def a__ (self, lowerCamelCase_=-2, lowerCamelCase_=-1 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = time()
self.dfs(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = time()
return end - begin
def a__ (self, lowerCamelCase_=-2 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = time()
self.bfs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = time()
return end - begin
| 316
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_4, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=0.02, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : str = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Optional[int] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = rotary_dim
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[Any] = vocab_size - 1
lowerCamelCase__ : List[Any] = vocab_size - 1
lowerCamelCase__ : Union[str, Any] = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowerCamelCase_, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, )
return (config, input_ids, input_mask)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 2_0
lowerCamelCase__ : List[str] = model_class_name(lowerCamelCase_ )
lowerCamelCase__ : Tuple = model.init_cache(input_ids.shape[0], lowerCamelCase_ )
lowerCamelCase__ : int = jnp.ones((input_ids.shape[0], max_decoder_length), dtype='i4' )
lowerCamelCase__ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, position_ids=lowerCamelCase_, )
lowerCamelCase__ : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' )
lowerCamelCase__ : Any = model(
input_ids[:, -1:], attention_mask=lowerCamelCase_, past_key_values=outputs_cache.past_key_values, position_ids=lowerCamelCase_, )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
lowerCamelCase__ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 2_0
lowerCamelCase__ : Dict = model_class_name(lowerCamelCase_ )
lowerCamelCase__ : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, )
lowerCamelCase__ : List[str] = model.init_cache(input_ids.shape[0], lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Optional[int] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, position_ids=lowerCamelCase_, )
lowerCamelCase__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' )
lowerCamelCase__ : Any = model(
input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowerCamelCase_, position_ids=lowerCamelCase_, )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
@require_flax
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ : List[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = FlaxGPTJModelTester(self )
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@tooslow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTaTokenizer.from_pretrained('gpt2', pad_token='<|endoftext|>', padding_side='left' )
lowerCamelCase__ : Any = tokenizer(['Hello this is a long string', 'Hey'], return_tensors='np', padding=lowerCamelCase_, truncation=lowerCamelCase_ )
lowerCamelCase__ : str = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = model.config.eos_token_id
lowerCamelCase__ : str = jax.jit(model.generate )
lowerCamelCase__ : str = jit_generate(
inputs['input_ids'], attention_mask=inputs['attention_mask'], pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : int = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : int = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
@is_pt_flax_cross_test
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : int = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : Dict = getattr(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Dict = pt_inputs['input_ids'].shape
lowerCamelCase__ : Union[str, Any] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = pt_model_class(lowerCamelCase_ ).eval()
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_, dtype=jnp.floataa )
lowerCamelCase__ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase_ )
lowerCamelCase__ : Tuple = fx_state
with torch.no_grad():
lowerCamelCase__ : Any = pt_model(**lowerCamelCase_ ).to_tuple()
lowerCamelCase__ : Union[str, Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = model_class.from_pretrained(lowerCamelCase_, from_pt=lowerCamelCase_ )
lowerCamelCase__ : Any = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ), len(lowerCamelCase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@is_pt_flax_cross_test
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Tuple = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : Optional[int] = getattr(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_, fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pt_inputs['input_ids'].shape
lowerCamelCase__ : Any = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : str = 0
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Dict = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = pt_model(**lowerCamelCase_ ).to_tuple()
lowerCamelCase__ : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = pt_model_class.from_pretrained(lowerCamelCase_, from_flax=lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : Any = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ), len(lowerCamelCase_ ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@tooslow
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 316
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 1
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = {}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if vertex not in self.adjacency:
lowerCamelCase__ : Optional[Any] = {}
self.num_vertices += 1
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
lowerCamelCase__ : List[Any] = weight
lowerCamelCase__ : Optional[int] = weight
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_edges()
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[Any] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase__ : Optional[int] = edges[i][2] + 1
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = edge
lowerCamelCase__ : Any = weight
lowerCamelCase__ : Any = weight
def __str__(self ):
'''simple docstring'''
lowerCamelCase__ : str = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase__ : List[Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ (self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def a__ (lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = Graph()
if vertices is None:
lowerCamelCase__ : Any = []
if edges is None:
lowerCamelCase__ : Any = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[Any] = {}
def __len__(self ):
'''simple docstring'''
return len(self.parent )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = item
lowerCamelCase__ : Union[str, Any] = 0
return item
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
lowerCamelCase__ : int = self.find(self.parent[item] )
return self.parent[item]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.find(lowerCamelCase_ )
lowerCamelCase__ : Dict = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase__ : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase__ : Union[str, Any] = roota
return roota
return None
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = graph.num_vertices
lowerCamelCase__ : Optional[Any] = Graph.UnionFind()
lowerCamelCase__ : Optional[Any] = []
while num_components > 1:
lowerCamelCase__ : Optional[int] = {}
for vertex in graph.get_vertices():
lowerCamelCase__ : Union[str, Any] = -1
lowerCamelCase__ : Tuple = graph.get_edges()
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = edge
lowerCamelCase__ : List[str] = union_find.find(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_, lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase__ : Union[str, Any] = num_components - 1
lowerCamelCase__ : Union[str, Any] = Graph.build(edges=lowerCamelCase_ )
return mst
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'markuplm'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=2_5_6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_1_6, lowerCamelCase_=1_0_0_1, lowerCamelCase_=3_2, lowerCamelCase_=5_0, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : List[str] = classifier_dropout
# additional properties
lowerCamelCase__ : Dict = max_depth
lowerCamelCase__ : Any = max_xpath_tag_unit_embeddings
lowerCamelCase__ : Tuple = max_xpath_subs_unit_embeddings
lowerCamelCase__ : int = tag_pad_id
lowerCamelCase__ : Optional[Any] = subs_pad_id
lowerCamelCase__ : Union[str, Any] = xpath_unit_hidden_size
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Tuple = {"vocab_file": "spiece.model"}
A_ : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
A_ : Dict = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
A_ : Optional[Any] = "▁"
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, lowerCamelCase_, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_="[CLS]", lowerCamelCase_="[SEP]", lowerCamelCase_="<unk>", lowerCamelCase_="[SEP]", lowerCamelCase_="<pad>", lowerCamelCase_="[CLS]", lowerCamelCase_="[MASK]", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (
AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_, normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_, lowerCamelCase_ )
else mask_token
)
lowerCamelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_, remove_space=lowerCamelCase_, keep_accents=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = do_lower_case
lowerCamelCase__ : Optional[int] = remove_space
lowerCamelCase__ : Any = keep_accents
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.sp_model )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Any = None
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase__ : List[Any] = ' '.join(inputs.strip().split() )
else:
lowerCamelCase__ : int = inputs
lowerCamelCase__ : str = outputs.replace('``', '"' ).replace('\'\'', '"' )
if not self.keep_accents:
lowerCamelCase__ : Tuple = unicodedata.normalize('NFKD', lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = ''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
lowerCamelCase__ : Union[str, Any] = outputs.lower()
return outputs
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.preprocess_text(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
lowerCamelCase__ : Any = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCamelCase__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_, '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase__ : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase__ : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = ''
lowerCamelCase__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : str = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[int] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
from math import isqrt, loga
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = False
return [i for i in range(2 , _lowerCamelCase ) if is_prime[i]]
def lowerCamelCase_ ( _lowerCamelCase = 80_0800 , _lowerCamelCase = 80_0800 ):
lowerCamelCase__ : Optional[Any] = degree * loga(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = int(_lowerCamelCase )
lowerCamelCase__ : List[Any] = calculate_prime_numbers(_lowerCamelCase )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = len(_lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 1
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
A_ : Any = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A_ : Tuple = get_tests_dir("fixtures/vocab.json")
A_ : Dict = get_tests_dir("fixtures")
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 0
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = WavaVecaConfig()
lowerCamelCase__ : Dict = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCamelCase_, os.path.join(lowerCamelCase_, lowerCamelCase_ ) )
copyfile(lowerCamelCase_, os.path.join(lowerCamelCase_, 'vocab.json' ) )
lowerCamelCase__ : str = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = WavaVecaFeatureExtractor()
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCamelCase__ : int = WavaVecaProcessor(lowerCamelCase_, lowerCamelCase_ )
# save in new folder
processor.save_pretrained(lowerCamelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCamelCase_, lowerCamelCase_ ), 'r' ) as f:
lowerCamelCase__ : Any = json.load(lowerCamelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCamelCase_, lowerCamelCase_ ), 'w' ) as f:
f.write(json.dumps(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = WavaVecaFeatureExtractor()
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCamelCase__ : List[Any] = WavaVecaProcessor(lowerCamelCase_, lowerCamelCase_ )
# save in new folder
processor.save_pretrained(lowerCamelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCamelCase_, lowerCamelCase_ ), 'r' ) as f:
lowerCamelCase__ : Dict = json.load(lowerCamelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCamelCase_, lowerCamelCase_ ), 'w' ) as f:
f.write(json.dumps(lowerCamelCase_ ) )
lowerCamelCase__ : Any = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Dict = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowerCamelCase_ )
# copy relevant files
copyfile(lowerCamelCase_, os.path.join(lowerCamelCase_, 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowerCamelCase_, lowerCamelCase_ ), 'w' ) as f:
f.write('{}' )
lowerCamelCase__ : Union[str, Any] = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase_ )
lowerCamelCase__ : int = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__, 'NewProcessor' )
lowerCamelCase__ : str = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
lowerCamelCase__ : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
# Test we can also load the slow version
lowerCamelCase__ : Optional[int] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase_, use_fast=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__, 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
def a__ (self ):
'''simple docstring'''
try:
AutoConfig.register('custom', lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_, lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_, slow_tokenizer_class=lowerCamelCase_ )
AutoProcessor.register(lowerCamelCase_, lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoProcessor.register(lowerCamelCase_, lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ : int = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int = os.path.join(lowerCamelCase_, 'vocab.txt' )
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCamelCase__ : List[str] = CustomTokenizer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = CustomProcessor(lowerCamelCase_, lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = AutoProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ (self ):
'''simple docstring'''
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = False
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = False
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'AutoFeatureExtractor'
lowerCamelCase__ : Dict = 'AutoTokenizer'
lowerCamelCase__ : str = False
try:
AutoConfig.register('custom', lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_, lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_, slow_tokenizer_class=lowerCamelCase_ )
AutoProcessor.register(lowerCamelCase_, lowerCamelCase_ )
# If remote code is not set, the default is to use local classes.
lowerCamelCase__ : Union[str, Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__, 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCamelCase__ : int = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase_ )
self.assertEqual(processor.__class__.__name__, 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCamelCase__ : List[str] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase_ )
self.assertEqual(processor.__class__.__name__, 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__, 'BertTokenizerFast' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__, 'ConvNextImageProcessor' )
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-processor' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = WavaVecaProcessor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase_, 'test-processor' ), push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase_, getattr(new_processor.feature_extractor, lowerCamelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase_, 'test-processor-org' ), push_to_hub=lowerCamelCase_, use_auth_token=self._token, organization='valid_org', )
lowerCamelCase__ : List[Any] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase_, getattr(new_processor.feature_extractor, lowerCamelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def a__ (self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCamelCase__ : Tuple = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Dict = os.path.join(lowerCamelCase_, 'vocab.txt' )
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCamelCase__ : Dict = CustomTokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = CustomProcessor(lowerCamelCase_, lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''', token=self._token )
lowerCamelCase__ : Optional[int] = Repository(lowerCamelCase_, clone_from=f'''{USER}/test-dynamic-processor''', token=self._token )
processor.save_pretrained(lowerCamelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map, {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
}, )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ) ) as f:
lowerCamelCase__ : Tuple = json.load(lowerCamelCase_ )
self.assertDictEqual(
tokenizer_config['auto_map'], {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
}, )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase_, 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase_, 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase_, 'custom_processing.py' ) ) )
repo.push_to_hub()
lowerCamelCase__ : Union[str, Any] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__, 'CustomProcessor' )
| 316
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Tuple = val
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase__ : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase__ : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase__ : Tuple = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
lowerCamelCase__ : List[Any] = qkv_bias
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 364 if 'coco' in model_name else 224
lowerCamelCase__ : List[str] = InstructBlipVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCamelCase__ : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase__ : Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCamelCase__ : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCamelCase__ : str = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCamelCase__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowerCamelCase__ : Any = InstructBlipConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase , qformer_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ):
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowerCamelCase__ : Optional[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCamelCase__ : Optional[int] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowerCamelCase__ , lowerCamelCase__ : Any = get_blipa_config(_lowerCamelCase )
lowerCamelCase__ : Dict = InstructBlipForConditionalGeneration(_lowerCamelCase ).eval()
lowerCamelCase__ : int = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCamelCase__ : str = 'cuda:1' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ : Dict = 'cuda:2' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCamelCase__ : Union[str, Any] = original_model.state_dict()
lowerCamelCase__ : Dict = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase__ : List[Any] = state_dict.pop(_lowerCamelCase )
if key.startswith('Qformer.bert' ):
lowerCamelCase__ : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCamelCase__ : Optional[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowerCamelCase__ : str = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCamelCase__ : Any = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowerCamelCase__ : int = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowerCamelCase__ : Tuple = key.replace('t5' , 'language' )
lowerCamelCase__ : Any = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
lowerCamelCase__ : List[str] = load_demo_image()
lowerCamelCase__ : int = 'What is unusual about this image?'
# create processor
lowerCamelCase__ : str = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
lowerCamelCase__ : int = InstructBlipProcessor(
image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase , )
lowerCamelCase__ : Tuple = processor(images=_lowerCamelCase , text=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# make sure processor creates exact same pixel values
lowerCamelCase__ : Dict = vis_processors['eval'](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
lowerCamelCase__ : str = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCamelCase__ : List[str] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowerCamelCase__ : Tuple = hf_model(**_lowerCamelCase ).logits
else:
lowerCamelCase__ : List[str] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowerCamelCase__ : Dict = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_lowerCamelCase )
lowerCamelCase__ : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase__ : List[Any] = hf_model(**_lowerCamelCase , labels=_lowerCamelCase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCamelCase__ : Union[str, Any] = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _lowerCamelCase , atol=_lowerCamelCase )
print('Looks ok!' )
print('Generating with original model...' )
lowerCamelCase__ : List[str] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowerCamelCase__ : str = hf_model.generate(
**_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCamelCase__ : List[Any] = 2
print('Original generation:' , _lowerCamelCase )
lowerCamelCase__ : List[Any] = processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = [text.strip() for text in output_text]
print('HF generation:' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
A_ : Optional[Any] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
A_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
A_ : int = namedtuple("covid_data", "cases deaths recovered")
def lowerCamelCase_ ( _lowerCamelCase = "https://www.worldometers.info/coronavirus/" ):
lowerCamelCase__ : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
A_ : Dict = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 316
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : int = logging.get_logger(__name__)
A_ : Tuple = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'longformer'
def __init__(self, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 1, lowerCamelCase_ = 0, lowerCamelCase_ = 2, lowerCamelCase_ = 3_0_5_2_2, lowerCamelCase_ = 7_6_8, lowerCamelCase_ = 1_2, lowerCamelCase_ = 1_2, lowerCamelCase_ = 3_0_7_2, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1e-12, lowerCamelCase_ = False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = attention_window
lowerCamelCase__ : List[Any] = sep_token_id
lowerCamelCase__ : Union[str, Any] = bos_token_id
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : int = onnx_export
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = "default", lowerCamelCase_ = None ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = True
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = super().outputs
if self.task == "default":
lowerCamelCase__ : Union[str, Any] = {0: 'batch'}
return outputs
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return max(super().default_onnx_opset, 1_4 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : List[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ : Tuple = 1
return inputs
| 316
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : int = len(_lowerCamelCase ) # No of vertices in graph
lowerCamelCase__ : Optional[int] = [0] * n
lowerCamelCase__ : str = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , id_ )
lowerCamelCase__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase__ : Optional[Any] = min(low[at] , low[to] )
lowerCamelCase__ : list[tuple[int, int]] = []
for i in range(_lowerCamelCase ):
if not visited[i]:
dfs(_lowerCamelCase , -1 , _lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase__ : Dict = DisjunctiveConstraint(lowerCamelCase_ )
self.assertTrue(isinstance(dc.token_ids, lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(lowerCamelCase_ ) # fails here
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase__ : Optional[int] = DisjunctiveConstraint(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = dc.update(1 )
lowerCamelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = dc.update(2 )
lowerCamelCase__ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = dc.update(3 )
lowerCamelCase__ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase__ : List[str] = DisjunctiveConstraint(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : str = ''
else:
lowerCamelCase__ : Union[str, Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = val
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Union[str, Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_lowerCamelCase , )
lowerCamelCase__ : str = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=384 , num_labels=1000 )
lowerCamelCase__ : str = False
# load original model from timm
lowerCamelCase__ : str = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Tuple = 'huggingface/label-files'
lowerCamelCase__ : Any = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase__ : List[Any] = ViTHybridModel(_lowerCamelCase ).eval()
else:
lowerCamelCase__ : Dict = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
lowerCamelCase__ : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
lowerCamelCase__ : Optional[int] = transform.transforms
lowerCamelCase__ : Optional[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase__ : List[Any] = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Tuple = transform(_lowerCamelCase ).unsqueeze(0 )
lowerCamelCase__ : List[str] = processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
lowerCamelCase__ : Tuple = model(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
lowerCamelCase__ : Optional[int] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
lowerCamelCase__ : int = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ : Any = 3
def lowerCamelCase_ ( _lowerCamelCase ):
print('Generating primitive root of p' )
while True:
lowerCamelCase__ : str = random.randrange(3 , _lowerCamelCase )
if pow(_lowerCamelCase , 2 , _lowerCamelCase ) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) == 1:
continue
return g
def lowerCamelCase_ ( _lowerCamelCase ):
print('Generating prime p...' )
lowerCamelCase__ : Tuple = rabin_miller.generate_large_prime(_lowerCamelCase ) # select large prime number.
lowerCamelCase__ : Union[str, Any] = primitive_root(_lowerCamelCase ) # one primitive root on modulo p.
lowerCamelCase__ : Dict = random.randrange(3 , _lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
lowerCamelCase__ : Optional[Any] = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : int = (key_size, e_a, e_a, p)
lowerCamelCase__ : List[Any] = (key_size, d)
return public_key, private_key
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCamelCase__ , lowerCamelCase__ : int = generate_key(_lowerCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def lowerCamelCase_ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 1
|
"""simple docstring"""
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCamelCase__ : List[Any] = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {', '.join(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
import functools
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCamelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase__ : Dict = set(_lowerCamelCase )
@functools.cache
def dynamic_programming(_lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
if hi < 0:
lowerCamelCase__ : str = len(_lowerCamelCase )
while lo < hi:
lowerCamelCase__ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase__ : List[str] = mid + 1
else:
lowerCamelCase__ : Optional[int] = mid
return lo
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
if hi < 0:
lowerCamelCase__ : int = len(_lowerCamelCase )
while lo < hi:
lowerCamelCase__ : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase__ : Optional[Any] = mid + 1
else:
lowerCamelCase__ : int = mid
return lo
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[int] = len(_lowerCamelCase ) - 1
while left <= right:
lowerCamelCase__ : Optional[int] = left + (right - left) // 2
lowerCamelCase__ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase__ : Optional[Any] = midpoint - 1
else:
lowerCamelCase__ : str = midpoint + 1
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if right < left:
return None
lowerCamelCase__ : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
A_ : Dict = input("Enter numbers separated by comma:\n").strip()
A_ : Dict = sorted(int(item) for item in user_input.split(","))
A_ : List[str] = int(input("Enter a single number to be found in the list:\n"))
A_ : List[str] = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 316
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=4, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Dict = use_attention_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = num_choices
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = None
if self.use_attention_mask:
lowerCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : str = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Dict = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : int = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Any = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ), lowerCamelCase_ )
# compare the actual values for a slice.
lowerCamelCase__ : str = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Union[str, Any] = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
A_ : int = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = set()
# keep track of all the paths to be checked
lowerCamelCase__ : Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase__ : List[Any] = queue.pop(0 )
# get the last node from the path
lowerCamelCase__ : Optional[Any] = path[-1]
if node not in explored:
lowerCamelCase__ : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase__ : str = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase__ : Optional[int] = [start]
lowerCamelCase__ : str = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
lowerCamelCase__ : Dict = {start: 0, target: -1}
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
if node == target:
lowerCamelCase__ : int = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
lowerCamelCase__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 316
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 1
|
"""simple docstring"""
A_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A_ : list[bool | None] = [None] * 10_00_00_00
A_ : Tuple = True
A_ : Union[str, Any] = False
def lowerCamelCase_ ( _lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ : str = chain(next_number(_lowerCamelCase ) )
lowerCamelCase__ : int = number_chain
while number < 1000_0000:
lowerCamelCase__ : str = number_chain
number *= 10
return number_chain
def lowerCamelCase_ ( _lowerCamelCase = 1000_0000 ):
for i in range(1 , _lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 316
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : str = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
def __init__(self, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = PILImageResampling.BILINEAR, lowerCamelCase_ = True, lowerCamelCase_ = 1 / 2_5_5, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 3_8_4}
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : str = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase__ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase__ : List[Any] = resample
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Union[str, Any] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = PILImageResampling.BICUBIC, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase__ : Optional[int] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase__ : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase__ : Tuple = get_resize_output_image_size(lowerCamelCase_, size=lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = resize(image=lowerCamelCase_, size=lowerCamelCase_, resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase_, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase_, **lowerCamelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase_, size=(shortest_edge, shortest_edge), resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return rescale(lowerCamelCase_, scale=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return normalize(lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = ChannelDimension.FIRST, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : str = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : Tuple = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : int = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Any = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : Optional[int] = [self.resize(image=lowerCamelCase_, size=lowerCamelCase_, crop_pct=lowerCamelCase_, resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : Optional[Any] = [self.rescale(image=lowerCamelCase_, scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : Tuple = [self.normalize(image=lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(lowerCamelCase_, lowerCamelCase_ ) for image in images]
lowerCamelCase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
| 316
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 1
|
"""simple docstring"""
A_ : Optional[Any] = "Input must be a string of 8 numbers plus letter"
A_ : str = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = f'''Expected string as input, found {type(_lowerCamelCase ).__name__}'''
raise TypeError(_lowerCamelCase )
lowerCamelCase__ : Tuple = spanish_id.replace('-' , '' ).upper()
if len(_lowerCamelCase ) != 9:
raise ValueError(_lowerCamelCase )
try:
lowerCamelCase__ : int = int(spanish_id_clean[0:8] )
lowerCamelCase__ : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCamelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Tuple = floats_tensor((1, 3, 1_6, 1_6), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ (self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def a__ (self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ (self ):
'''simple docstring'''
self._test_save_load_local()
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
A_ : Tuple = [8, 5, 9, 7]
A_ : Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A_ : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = claim_vector
lowerCamelCase__ : List[Any] = allocated_resources_table
lowerCamelCase__ : Dict = maximum_claim_table
def a__ (self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a__ (self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a__ (self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a__ (self ):
'''simple docstring'''
return {self.__need().index(lowerCamelCase_ ): i for i in self.__need()}
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.__need()
lowerCamelCase__ : List[Any] = self.__allocated_resources_table
lowerCamelCase__ : Optional[Any] = self.__available_resources()
lowerCamelCase__ : Optional[int] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
lowerCamelCase__ : List[Any] = False
for each_need in need_list:
lowerCamelCase__ : Tuple = True
for index, need in enumerate(lowerCamelCase_ ):
if need > available_resources[index]:
lowerCamelCase__ : Optional[int] = False
break
if execution:
lowerCamelCase__ : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Dict = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase_ )
# update available/freed resources stack
lowerCamelCase__ : List[str] = np.array(lowerCamelCase_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowerCamelCase_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def a__ (self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(lowerCamelCase_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(lowerCamelCase_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowerCamelCase_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(lowerCamelCase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return int(input_a == input_a == 0 )
def lowerCamelCase_ ( ):
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 316
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 1
|
"""simple docstring"""
import string
from math import logaa
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowerCamelCase__ : str = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ : int = corpus_without_punctuation.split('\n' )
lowerCamelCase__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase ))
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return round(tf * idf , 3 )
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
from functools import reduce
A_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase_ ( _lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCamelCase , _lowerCamelCase : str(int(_lowerCamelCase ) * int(_lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 1
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# ===== initialization =====
lowerCamelCase__ : Optional[Any] = Mock()
lowerCamelCase__ : Optional[Any] = conn, Mock()
lowerCamelCase__ : int = iter([1, None] )
lowerCamelCase__ : Dict = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 316
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase )]
lowerCamelCase__ : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ : List[Any] = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase )
lowerCamelCase__ : Any = [''.join(_lowerCamelCase ) for row in temp_grid]
lowerCamelCase__ : List[Any] = ''.join(_lowerCamelCase )
return output_string
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = []
lowerCamelCase__ : int = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
lowerCamelCase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase )] # generates template
for position in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : List[str] = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ : int = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
lowerCamelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCamelCase__ : str = input_string[counter : counter + len(_lowerCamelCase )]
grid.append(list(_lowerCamelCase ) )
counter += len(_lowerCamelCase )
lowerCamelCase__ : Tuple = '' # reads as zigzag
for position in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ : Tuple = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = {}
for key_guess in range(1 , len(_lowerCamelCase ) ): # tries every key
lowerCamelCase__ : Tuple = decrypt(_lowerCamelCase , _lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ : List[Any] = None
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A_ : Dict = {
"facebook/mbart-large-en-ro": 10_24,
"facebook/mbart-large-cc25": 10_24,
}
# fmt: off
A_ : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = ['input_ids', 'attention_mask']
lowerCamelCase__ : Union[str, Any] = MBartTokenizer
lowerCamelCase__ : List[int] = []
lowerCamelCase__ : List[int] = []
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase_, tokenizer_file=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, unk_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, src_lang=lowerCamelCase_, tgt_lang=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : int = vocab_file
lowerCamelCase__ : Tuple = False if not self.vocab_file else True
lowerCamelCase__ : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : str = src_lang
lowerCamelCase__ : str = self(lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tgt_lang_id
return inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "en_XX", lowerCamelCase_ = None, lowerCamelCase_ = "ro_RO", **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = src_lang
lowerCamelCase__ : str = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Any = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.convert_tokens_to_ids(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Tuple = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase__ : int = 1
lowerCamelCase__ : str = 1
while repunit:
lowerCamelCase__ : Optional[int] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 316
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=[3_0, 3_0], lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=8, lowerCamelCase_=1_0, ):
'''simple docstring'''
lowerCamelCase__ : str = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : List[Any] = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Any = scope
lowerCamelCase__ : int = n_targets
lowerCamelCase__ : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase__ : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase__ : Any = num_patches + 1 + self.num_detection_tokens
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase__ : Dict = []
for i in range(self.batch_size ):
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Tuple = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.rand(self.n_targets, 4, device=lowerCamelCase_ )
labels.append(lowerCamelCase_ )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = YolosModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = YolosForObjectDetection(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(pixel_values=lowerCamelCase_ )
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase__ : Optional[int] = model(pixel_values=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = config_and_inputs
lowerCamelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Dict = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase__ : List[str] = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : int = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase_, dtype=torch.long )
lowerCamelCase__ : Tuple = torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase_, dtype=torch.float )
labels.append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = labels
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = YolosModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = True
# in YOLOS, the seq_len is different
lowerCamelCase__ : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : int = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Any = True
lowerCamelCase__ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Optional[int] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : List[str] = outputs.hidden_states
lowerCamelCase__ : int = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# YOLOS has a different seq_length
lowerCamelCase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = YolosModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(inputs.pixel_values )
# verify outputs
lowerCamelCase__ : Optional[Any] = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]], device=lowerCamelCase_, )
lowerCamelCase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]], device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
# verify postprocessing
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_object_detection(
lowerCamelCase_, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowerCamelCase__ : List[Any] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(lowerCamelCase_ )
lowerCamelCase__ : int = [7_5, 7_5, 1_7, 6_3, 1_7]
lowerCamelCase__ : Any = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(lowerCamelCase_ )
self.assertEqual(len(results['scores'] ), 5 )
self.assertTrue(torch.allclose(results['scores'], lowerCamelCase_, atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist(), lowerCamelCase_ )
self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCamelCase_ ) )
| 316
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 1
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['pixel_values']
def __init__(self, lowerCamelCase_ = True, lowerCamelCase_ = 1 / 2_5_5, lowerCamelCase_ = True, lowerCamelCase_ = 8, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any = do_rescale
lowerCamelCase__ : Tuple = rescale_factor
lowerCamelCase__ : List[str] = do_pad
lowerCamelCase__ : List[Any] = pad_size
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_ ):
'''simple docstring'''
return rescale(lowerCamelCase_, scale=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = get_image_size(lowerCamelCase_ )
lowerCamelCase__ : Tuple = (old_height // size + 1) * size - old_height
lowerCamelCase__ : int = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase_, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = ChannelDimension.FIRST, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Tuple = do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : List[str] = pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : List[Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[int] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : str = [self.rescale(image=lowerCamelCase_, scale=lowerCamelCase_ ) for image in images]
if do_pad:
lowerCamelCase__ : Dict = [self.pad(lowerCamelCase_, size=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Tuple = [to_channel_dimension_format(lowerCamelCase_, lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
| 316
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = ''
lowerCamelCase__ : int = ''
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = 2_5_6
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : int = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ : List[Any] = copy.deepcopy(self.img )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = plt.hist(self.img.ravel(), 2_5_6, [0, 2_5_6], label='x' )
lowerCamelCase__ : str = np.sum(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : List[str] = x[i] / self.k
self.sk += prk
lowerCamelCase__ : List[str] = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase__ : Tuple = int(last % last )
lowerCamelCase__ : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase_ )
lowerCamelCase__ : str = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase__ : Any = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase__ : int = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase__ : Any = self.last_list[num]
cva.imwrite('output_data/output.jpg', self.img )
def a__ (self ):
'''simple docstring'''
plt.hist(self.img.ravel(), 2_5_6, [0, 2_5_6] )
def a__ (self ):
'''simple docstring'''
cva.imshow('Output-Image', self.img )
cva.imshow('Input-Image', self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
A_ : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 316
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A_ : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , ):
output_path.parent.mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , use_external_data_format=_lowerCamelCase , enable_onnx_checker=_lowerCamelCase , opset_version=_lowerCamelCase , )
else:
export(
_lowerCamelCase , _lowerCamelCase , f=output_path.as_posix() , input_names=_lowerCamelCase , output_names=_lowerCamelCase , dynamic_axes=_lowerCamelCase , do_constant_folding=_lowerCamelCase , opset_version=_lowerCamelCase , )
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase__ : Union[str, Any] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCamelCase__ : Tuple = 'cpu'
lowerCamelCase__ : Dict = Path(_lowerCamelCase )
# VAE DECODER
lowerCamelCase__ : List[str] = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowerCamelCase__ : Any = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase__ : List[str] = vae_decoder.decode
onnx_export(
_lowerCamelCase , model_args=(
torch.randn(1 , _lowerCamelCase , 25 , 25 ).to(device=_lowerCamelCase , dtype=_lowerCamelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_lowerCamelCase , )
del vae_decoder
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
A_ : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 1
|
"""simple docstring"""
class a_ : # Public class to implement a graph
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = row
lowerCamelCase__ : Optional[Any] = col
lowerCamelCase__ : Any = graph
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCamelCase__ : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCamelCase__ : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k], j + col_nbr[k], lowerCamelCase_ ):
self.diffs(i + row_nbr[k], j + col_nbr[k], lowerCamelCase_ )
def a__ (self ): # And finally, count all islands.
'''simple docstring'''
lowerCamelCase__ : int = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCamelCase__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
count += 1
return count
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase_ ( _lowerCamelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__ : List[Any] = model_type_to_module_name(_lowerCamelCase )
lowerCamelCase__ : Dict = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , '__name__' , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__ : Tuple = importlib.import_module('transformers' )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
lowerCamelCase__ : int = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowerCamelCase , encoding='utf-8' ) as reader:
return json.load(_lowerCamelCase )
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase_ )
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = kwargs.pop('config', lowerCamelCase_ )
lowerCamelCase__ : List[Any] = kwargs.pop('trust_remote_code', lowerCamelCase_ )
lowerCamelCase__ : Any = True
lowerCamelCase__ , lowerCamelCase__ : str = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = config_dict.get('feature_extractor_type', lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# It could be in `config.feature_extractor_type``
lowerCamelCase__ : List[Any] = getattr(lowerCamelCase_, 'feature_extractor_type', lowerCamelCase_ )
if hasattr(lowerCamelCase_, 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase__ : List[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowerCamelCase__ : str = feature_extractor_class_from_name(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase__ : List[str] = feature_extractor_class is not None or type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase__ : Tuple = resolve_trust_remote_code(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
if has_remote_code and trust_remote_code:
lowerCamelCase__ : str = get_class_from_dynamic_module(
lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('code_revision', lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase__ : int = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase_ )]
return feature_extractor_class.from_dict(lowerCamelCase_, **lowerCamelCase_ )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase_, lowerCamelCase_ )
| 316
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ViTMSNModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCamelCase__ : Dict = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, labels=lowerCamelCase_ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : str = 1
lowerCamelCase__ : Tuple = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[str] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ViTMSNModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = ViTMSNModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
torch.manual_seed(2 )
lowerCamelCase__ : Any = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(lowerCamelCase_ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : str = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'convbert'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=7_6_8, lowerCamelCase_=2, lowerCamelCase_=9, lowerCamelCase_=1, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = embedding_size
lowerCamelCase__ : int = head_ratio
lowerCamelCase__ : List[Any] = conv_kernel_size
lowerCamelCase__ : Optional[Any] = num_groups
lowerCamelCase__ : List[Any] = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'luke'
def __init__(self, lowerCamelCase_=5_0_2_6_7, lowerCamelCase_=5_0_0_0_0_0, lowerCamelCase_=7_6_8, lowerCamelCase_=2_5_6, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = entity_vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : List[Any] = entity_emb_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Optional[Any] = use_entity_aware_attention
lowerCamelCase__ : Union[str, Any] = classifier_dropout
| 316
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , 'Tatoeba directory does not exist.' )
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.resolver.write_model_card('opus-mt-he-en', dry_run=lowerCamelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 1
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = ['image_processor', 'tokenizer']
lowerCamelCase__ : int = 'OwlViTImageProcessor'
lowerCamelCase__ : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : str = kwargs.pop('feature_extractor' )
lowerCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="max_length", lowerCamelCase_="np", **lowerCamelCase_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowerCamelCase_, lowerCamelCase_ ) or (isinstance(lowerCamelCase_, lowerCamelCase_ ) and not isinstance(text[0], lowerCamelCase_ )):
lowerCamelCase__ : Optional[Any] = [self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )]
elif isinstance(lowerCamelCase_, lowerCamelCase_ ) and isinstance(text[0], lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = []
# Maximum number of queries across batch
lowerCamelCase__ : Any = max([len(lowerCamelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase_ ) != max_num_queries:
lowerCamelCase__ : str = t + [' '] * (max_num_queries - len(lowerCamelCase_ ))
lowerCamelCase__ : str = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
encodings.append(lowerCamelCase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase__ : List[str] = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0 )
lowerCamelCase__ : List[str] = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0 )
lowerCamelCase__ : int = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase__ : Any = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0 )
lowerCamelCase__ : List[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase__ : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0 )
lowerCamelCase__ : Any = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase__ : Union[str, Any] = BatchEncoding()
lowerCamelCase__ : Union[str, Any] = input_ids
lowerCamelCase__ : str = attention_mask
if query_images is not None:
lowerCamelCase__ : int = BatchEncoding()
lowerCamelCase__ : Any = self.image_processor(
lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ ).pixel_values
lowerCamelCase__ : int = query_pixel_values
if images is not None:
lowerCamelCase__ : Dict = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase__ : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.image_processor.post_process(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase_, )
return self.image_processor_class
@property
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase_, )
return self.image_processor
| 316
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 1
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase__ : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowerCamelCase__ : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase__ : List[str] = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowerCamelCase__ : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowerCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase__ : int = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase_, variant=lowerCamelCase_ ) )
| 316
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
A_ : Optional[Any] = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.', lowerCamelCase_, )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase_ ( _lowerCamelCase ):
return 1 / (1 + np.exp(-z ))
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return (-y * np.log(_lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = np.dot(_lowerCamelCase , _lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase ) ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=7_0000 ):
lowerCamelCase__ : Dict = np.zeros(x.shape[1] )
for iterations in range(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = sigmoid_function(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = np.dot(x.T , h - y ) / y.size
lowerCamelCase__ : Tuple = theta - alpha * gradient # updating the weights
lowerCamelCase__ : Tuple = np.dot(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Dict = sigmoid_function(_lowerCamelCase )
lowerCamelCase__ : Tuple = cost_function(_lowerCamelCase , _lowerCamelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ : str = datasets.load_iris()
A_ : str = iris.data[:, :2]
A_ : Optional[Any] = (iris.target != 0) * 1
A_ : List[Any] = 0.1
A_ : Union[str, Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCamelCase_ ( _lowerCamelCase ):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A_), (A_)) : List[str] = (x[:, 0].min(), x[:, 0].max())
((A_), (A_)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((A_), (A_)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
A_ : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 316
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A_ : int = imread(r"digital_image_processing/image_data/lena_small.jpg")
A_ : str = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = cn.convert_to_negative(_lowerCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCamelCase , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase__ : Dict = canny.canny(_lowerCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase_ ( ):
assert gg.gaussian_filter(_lowerCamelCase , 5 , sigma=0.9 ).all()
def lowerCamelCase_ ( ):
# laplace diagonals
lowerCamelCase__ : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase__ : Tuple = conv.img_convolve(_lowerCamelCase , _lowerCamelCase ).astype(_lowerCamelCase )
assert res.any()
def lowerCamelCase_ ( ):
assert med.median_filter(_lowerCamelCase , 3 ).any()
def lowerCamelCase_ ( ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = sob.sobel_filter(_lowerCamelCase )
assert grad.any() and theta.any()
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = sp.make_sepia(_lowerCamelCase , 20 )
assert sepia.all()
def lowerCamelCase_ ( _lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
lowerCamelCase__ : List[str] = bs.Burkes(imread(_lowerCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase_ ( _lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase__ : int = rs.NearestNeighbour(imread(_lowerCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase_ ( ):
lowerCamelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase__ : str = imread(_lowerCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Union[str, Any] = image[x_coordinate][y_coordinate]
lowerCamelCase__ : int = lbp.get_neighbors_pixel(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase__ : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase__ : Dict = lbp.local_binary_value(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert lbp_image.any()
| 316
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.