code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections.abc import Sequence
from queue import Queue
class lowercase :
def __init__( self ,A__ ,A__ ,A__ ,A__=None ,A__=None):
lowercase = start
lowercase = end
lowercase = val
lowercase = (start + end) // 2
lowercase = left
lowercase = right
def __repr__( self):
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowercase :
def __init__( self ,A__ ,A__):
lowercase = collection
lowercase = function
if self.collection:
lowercase = self._build_tree(0 ,len(snake_case__) - 1)
def A__ ( self ,A__ ,A__):
self._update_tree(self.root ,snake_case__ ,snake_case__)
def A__ ( self ,A__ ,A__):
return self._query_range(self.root ,snake_case__ ,snake_case__)
def A__ ( self ,A__ ,A__):
if start == end:
return SegmentTreeNode(snake_case__ ,snake_case__ ,self.collection[start])
lowercase = (start + end) // 2
lowercase = self._build_tree(snake_case__ ,snake_case__)
lowercase = self._build_tree(mid + 1 ,snake_case__)
return SegmentTreeNode(snake_case__ ,snake_case__ ,self.fn(left.val ,right.val) ,snake_case__ ,snake_case__)
def A__ ( self ,A__ ,A__ ,A__):
if node.start == i and node.end == i:
lowercase = val
return
if i <= node.mid:
self._update_tree(node.left ,snake_case__ ,snake_case__)
else:
self._update_tree(node.right ,snake_case__ ,snake_case__)
lowercase = self.fn(node.left.val ,node.right.val)
def A__ ( self ,A__ ,A__ ,A__):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,snake_case__ ,snake_case__)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,snake_case__ ,node.mid) ,self._query_range(node.right ,node.mid + 1 ,snake_case__) ,)
else:
# range in right child tree
return self._query_range(node.right ,snake_case__ ,snake_case__)
def A__ ( self):
if self.root is not None:
lowercase = Queue()
queue.put(self.root)
while not queue.empty():
lowercase = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
lowercase__ :Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase_ : Optional[Any] =Features({'''image''': Image()} )
lowercase_ : Tuple =Features({'''labels''': ClassLabel} )
lowercase_ : Optional[int] ='''image'''
lowercase_ : Tuple ='''labels'''
def A__ ( self ,A__):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] ,A__):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.')
lowercase = copy.deepcopy(self)
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def A__ ( self):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowercase = len(_A )
lowercase = max(_A )
lowercase = min(_A )
# create the counting array
lowercase = coll_max + 1 - coll_min
lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _A ):
lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _A ) ):
lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return "".join([chr(_A ) for i in counting_sort([ord(_A ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
lowercase__ :int = input("Enter numbers separated by a comma:\n").strip()
lowercase__ :Optional[int] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase__ :Any = logging.get_logger(__name__)
@dataclass
class lowercase ( _A ):
lowercase_ : Any =[
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**A__):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(UpperCamelCase__)
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}')
lowercase = kwargs.pop('''tpu_name''' ,self.tpu_name)
lowercase = kwargs.pop('''device_idx''' ,self.device_idx)
lowercase = kwargs.pop('''eager_mode''' ,self.eager_mode)
lowercase = kwargs.pop('''use_xla''' ,self.use_xla)
super().__init__(**UpperCamelCase__)
lowercase_ : Optional[int] =field(
default=_A , metadata={'''help''': '''Name of TPU'''} , )
lowercase_ : Optional[Any] =field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowercase_ : Tuple =field(default=_A , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowercase_ : Optional[Any] =field(
default=_A , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def A__ ( self):
requires_backends(self ,['''tf'''])
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def A__ ( self):
requires_backends(self ,['''tf'''])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
lowercase = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'''GPU''')
lowercase = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}')
else:
tf.config.set_visible_devices([] ,'''GPU''') # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}')
return strategy
@property
def A__ ( self):
requires_backends(self ,['''tf'''])
return self._setup_tpu is not None
@property
def A__ ( self):
requires_backends(self ,['''tf'''])
return self._setup_strategy
@property
def A__ ( self):
requires_backends(self ,['''tf'''])
return tf.config.list_physical_devices('''GPU''')
@property
def A__ ( self):
requires_backends(self ,['''tf'''])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def A__ ( self):
return self.n_gpu > 0
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
lowercase__ :Optional[int] = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowercase__ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Optional[int] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase ( __lowerCamelCase ):
lowercase_ : Dict ='char'
lowercase_ : Union[str, Any] ='bpe'
lowercase_ : str ='wp'
lowercase__ :Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase ( __lowerCamelCase ):
lowercase_ : Optional[int] =['image_processor', 'char_tokenizer']
lowercase_ : int ='ViTImageProcessor'
lowercase_ : Any ='MgpstrTokenizer'
def __init__( self ,A__=None ,A__=None ,**A__):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,UpperCamelCase_ ,)
lowercase = kwargs.pop('''feature_extractor''')
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
lowercase = tokenizer
lowercase = AutoTokenizer.from_pretrained('''gpt2''')
lowercase = AutoTokenizer.from_pretrained('''bert-base-uncased''')
super().__init__(UpperCamelCase_ ,UpperCamelCase_)
def __call__( self ,A__=None ,A__=None ,A__=None ,**A__):
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
lowercase = self.image_processor(UpperCamelCase_ ,return_tensors=UpperCamelCase_ ,**UpperCamelCase_)
if text is not None:
lowercase = self.char_tokenizer(UpperCamelCase_ ,return_tensors=UpperCamelCase_ ,**UpperCamelCase_)
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['''input_ids''']
return inputs
def A__ ( self ,A__):
lowercase , lowercase , lowercase = sequences
lowercase = char_preds.size(0)
lowercase , lowercase = self._decode_helper(UpperCamelCase_ ,'''char''')
lowercase , lowercase = self._decode_helper(UpperCamelCase_ ,'''bpe''')
lowercase , lowercase = self._decode_helper(UpperCamelCase_ ,'''wp''')
lowercase = []
lowercase = []
for i in range(UpperCamelCase_):
lowercase = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase = scores.index(max(UpperCamelCase_))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
lowercase = {}
lowercase = final_strs
lowercase = final_scores
lowercase = char_strs
lowercase = bpe_strs
lowercase = wp_strs
return out
def A__ ( self ,A__ ,A__):
if format == DecodeType.CHARACTER:
lowercase = self.char_decode
lowercase = 1
lowercase = '''[s]'''
elif format == DecodeType.BPE:
lowercase = self.bpe_decode
lowercase = 2
lowercase = '''#'''
elif format == DecodeType.WORDPIECE:
lowercase = self.wp_decode
lowercase = 1_0_2
lowercase = '''[SEP]'''
else:
raise ValueError(f'Format {format} is not supported.')
lowercase , lowercase = [], []
lowercase = pred_logits.size(0)
lowercase = pred_logits.size(1)
lowercase , lowercase = pred_logits.topk(1 ,dim=-1 ,largest=UpperCamelCase_ ,sorted=UpperCamelCase_)
lowercase = preds_index.view(-1 ,UpperCamelCase_)[:, 1:]
lowercase = decoder(UpperCamelCase_)
lowercase , lowercase = torch.nn.functional.softmax(UpperCamelCase_ ,dim=2).max(dim=2)
lowercase = preds_max_prob[:, 1:]
for index in range(UpperCamelCase_):
lowercase = preds_str[index].find(UpperCamelCase_)
lowercase = preds_str[index][:pred_eos]
lowercase = preds_index[index].cpu().tolist()
lowercase = pred_index.index(UpperCamelCase_) if eos_token in pred_index else -1
lowercase = preds_max_prob[index][: pred_eos_index + 1]
lowercase = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_)
conf_scores.append(UpperCamelCase_)
return dec_strs, conf_scores
def A__ ( self ,A__):
lowercase = [seq.replace(''' ''' ,'''''') for seq in self.char_tokenizer.batch_decode(UpperCamelCase_)]
return decode_strs
def A__ ( self ,A__):
return self.bpe_tokenizer.batch_decode(UpperCamelCase_)
def A__ ( self ,A__):
lowercase = [seq.replace(''' ''' ,'''''') for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_)]
return decode_strs
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowercase ( pl.LightningModule ):
def __init__( self ,A__):
super().__init__()
lowercase = model
lowercase = 2
lowercase = nn.Linear(self.model.config.hidden_size ,self.num_labels)
def A__ ( self):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = LongformerModel.from_pretrained(lowerCamelCase__ )
lowercase = LightningModel(lowerCamelCase__ )
lowercase = torch.load(lowerCamelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowercase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Optional[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =42
lowercase_ : int =42
def __init__( self ,A__ ,A__):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_)
@torch.no_grad()
def __call__( self ,A__ = 1 ,A__ = 5_0 ,A__ = None ,A__ = "pil" ,A__ = True ,**A__ ,):
lowercase = self.unet.config.sample_size
lowercase = (batch_size, 3, img_size, img_size)
lowercase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase = randn_tensor(lowercase_ ,generator=lowercase_ ,device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
lowercase = self.scheduler.schedule[t]
lowercase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase , lowercase = self.scheduler.add_noise_to_input(lowercase_ ,lowercase_ ,generator=lowercase_)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase = self.scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2).sample
lowercase = self.scheduler.step_correct(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,step_output.prev_sample ,step_output['''derivative'''] ,)
lowercase = step_output.prev_sample
lowercase = (sample / 2 + 0.5).clamp(0 ,1)
lowercase = sample.cpu().permute(0 ,2 ,3 ,1).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(snake_case__ )
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if numbers[j] < numbers[i]:
lowercase , lowercase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase__ :Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ :int = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :int = 'docs/source/en/_toctree.yml'
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :Optional[int] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = v.conjugate().T
lowercase = v_star.dot(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ ))
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase__ ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase__ ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( __lowerCamelCase , unittest.TestCase ):
lowercase_ : Dict =LEDTokenizer
lowercase_ : List[Any] =LEDTokenizerFast
lowercase_ : Tuple =True
def A__ ( self):
super().setUp()
lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase = dict(zip(SCREAMING_SNAKE_CASE_ ,range(len(SCREAMING_SNAKE_CASE_))))
lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase = {'''unk_token''': '''<unk>'''}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_)
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_)
def A__ ( self ,A__):
return "lower newer", "lower newer"
@cached_property
def A__ ( self):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def A__ ( self):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def A__ ( self):
lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(SCREAMING_SNAKE_CASE_ ,max_length=len(SCREAMING_SNAKE_CASE_) ,padding=SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''')
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
self.assertEqual((2, 9) ,batch.input_ids.shape)
self.assertEqual((2, 9) ,batch.attention_mask.shape)
lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
@require_torch
def A__ ( self):
lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''')
self.assertIn('''input_ids''' ,SCREAMING_SNAKE_CASE_)
self.assertIn('''attention_mask''' ,SCREAMING_SNAKE_CASE_)
self.assertNotIn('''labels''' ,SCREAMING_SNAKE_CASE_)
self.assertNotIn('''decoder_attention_mask''' ,SCREAMING_SNAKE_CASE_)
@require_torch
def A__ ( self):
lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ,max_length=3_2 ,padding='''max_length''' ,return_tensors='''pt''')
self.assertEqual(3_2 ,targets['''input_ids'''].shape[1])
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''')
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
self.assertEqual(batch.input_ids.shape ,(2, 5_1_2_2))
@require_torch
def A__ ( self):
lowercase = ['''A long paragraph for summarization.''']
lowercase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''')
lowercase = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''')
lowercase = inputs['''input_ids''']
lowercase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = ['''Summary of the text.''', '''Another summary.''']
lowercase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase = tokenizer(SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_)
lowercase = [[0] * len(SCREAMING_SNAKE_CASE_) for x in encoded_output['''input_ids''']]
lowercase = tokenizer.pad(SCREAMING_SNAKE_CASE_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] ,SCREAMING_SNAKE_CASE_)
def A__ ( self):
pass
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
lowercase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
lowercase = '''A, <mask> AllenNLP sentence.'''
lowercase = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_)
lowercase = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) ,sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) ,sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) ,)
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import math
from collections.abc import Callable
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = xa
lowercase = xa
while True:
if x_n == x_na or function(lowerCAmelCase__ ) == function(lowerCAmelCase__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
lowercase = x_na - (
function(lowerCAmelCase__ ) / ((function(lowerCAmelCase__ ) - function(lowerCAmelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowercase = x_na
lowercase = x_na
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return math.pow(lowerCAmelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :Any = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( lowercase__ ):
lowercase_ : Optional[Any] ='''speech_to_text_2'''
lowercase_ : Optional[int] =['''past_key_values''']
lowercase_ : Union[str, Any] ={'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,A__=1_0_0_0_0 ,A__=6 ,A__=2_0_4_8 ,A__=4 ,A__=0.0 ,A__=True ,A__="relu" ,A__=2_5_6 ,A__=0.1 ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=2 ,A__=True ,A__=1 ,A__=0 ,A__=2 ,A__=1_0_2_4 ,**A__ ,):
lowercase = vocab_size
lowercase = d_model
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = decoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ ,bos_token_id=UpperCAmelCase__ ,eos_token_id=UpperCAmelCase__ ,decoder_start_token_id=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
lowercase__ :List[Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = str(lowerCamelCase_ )
return len(lowerCamelCase_ ) == 9 and set(lowerCamelCase_ ) == set('''123456789''' )
def UpperCamelCase ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
lowercase = 10_0002 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowercase = 100_2003 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ):
'''simple docstring'''
if start is None:
lowercase : Union[str, Any] = 0
if end is None:
lowercase : Dict = len(UpperCamelCase__ ) - 1
if start >= end:
return
lowercase : Tuple = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
lowercase , lowercase : int = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ :Tuple = imread("image_data/lena.jpg", 1)
# convert to its negative
lowercase__ :Optional[int] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :List[Any] = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[Any] = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :List[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowercase ( __lowercase ):
lowercase_ : Optional[Any] ='''markuplm'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__=0 ,A__=2 ,A__=2_5_6 ,A__=1_0_2_4 ,A__=2_1_6 ,A__=1_0_0_1 ,A__=3_2 ,A__=5_0 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,):
super().__init__(
pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ ,)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
# additional properties
lowercase = max_depth
lowercase = max_xpath_tag_unit_embeddings
lowercase = max_xpath_subs_unit_embeddings
lowercase = tag_pad_id
lowercase = subs_pad_id
lowercase = xpath_unit_hidden_size
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple ='''bit'''
lowercase_ : List[Any] =['''preactivation''', '''bottleneck''']
lowercase_ : Any =['''SAME''', '''VALID''']
def __init__( self ,A__=3 ,A__=6_4 ,A__=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,A__=[3, 4, 6, 3] ,A__="preactivation" ,A__="relu" ,A__=None ,A__=3_2 ,A__=0.0 ,A__=False ,A__=3_2 ,A__=1 ,A__=None ,A__=None ,**A__ ,):
super().__init__(**__UpperCamelCase)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported')
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = global_padding
lowercase = num_groups
lowercase = drop_path_rate
lowercase = embedding_dynamic_padding
lowercase = output_stride
lowercase = width_factor
lowercase = ['''stem'''] + [f'stage{idx}' for idx in range(1 ,len(__UpperCamelCase) + 1)]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase ,out_indices=__UpperCamelCase ,stage_names=self.stage_names)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase__ :int = TypeVar("KEY")
lowercase__ :Any = TypeVar("VAL")
@dataclass(frozen=A_ , slots=A_ )
class lowercase ( Generic[KEY, VAL] ):
lowercase_ : KEY
lowercase_ : VAL
class lowercase ( _Item ):
def __init__( self):
super().__init__(A__ ,A__)
def __bool__( self):
return False
lowercase__ :List[str] = _DeletedItem()
class lowercase ( MutableMapping[KEY, VAL] ):
def __init__( self ,A__ = 8 ,A__ = 0.75):
lowercase = initial_block_size
lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase = capacity_factor
lowercase = 0
def A__ ( self ,A__):
return hash(A__) % len(self._buckets)
def A__ ( self ,A__):
return (ind + 1) % len(self._buckets)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self._buckets[ind]
if not stored:
lowercase = _Item(A__ ,A__)
self._len += 1
return True
elif stored.key == key:
lowercase = _Item(A__ ,A__)
return True
else:
return False
def A__ ( self):
lowercase = len(self._buckets) * self._capacity_factor
return len(self) >= int(A__)
def A__ ( self):
if len(self._buckets) <= self._initial_block_size:
return False
lowercase = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def A__ ( self ,A__):
lowercase = self._buckets
lowercase = [None] * new_size
lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val)
def A__ ( self):
self._resize(len(self._buckets) * 2)
def A__ ( self):
self._resize(len(self._buckets) // 2)
def A__ ( self ,A__):
lowercase = self._get_bucket_index(A__)
for _ in range(len(self._buckets)):
yield ind
lowercase = self._get_next_ind(A__)
def A__ ( self ,A__ ,A__):
for ind in self._iterate_buckets(A__):
if self._try_set(A__ ,A__ ,A__):
break
def __setitem__( self ,A__ ,A__):
if self._is_full():
self._size_up()
self._add_item(A__ ,A__)
def __delitem__( self ,A__):
for ind in self._iterate_buckets(A__):
lowercase = self._buckets[ind]
if item is None:
raise KeyError(A__)
if item is _deleted:
continue
if item.key == key:
lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self ,A__):
for ind in self._iterate_buckets(A__):
lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A__)
def __len__( self):
return self._len
def __iter__( self):
yield from (item.key for item in self._buckets if item)
def __repr__( self):
lowercase = """ ,""".join(
f'{item.key}: {item.val}' for item in self._buckets if item)
return f'HashMap({val_string})'
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ :Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( __A ):
lowercase_ : Dict =["""pixel_values"""]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BICUBIC ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 2_5_5 ,A__ = True ,A__ = None ,A__ = None ,A__ = True ,**A__ ,):
super().__init__(**UpperCamelCase__)
lowercase = size if size is not None else {'shortest_edge': 2_2_4}
lowercase = get_size_dict(UpperCamelCase__ ,default_to_square=UpperCamelCase__)
lowercase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase = get_size_dict(UpperCamelCase__ ,default_to_square=UpperCamelCase__ ,param_name='''crop_size''')
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BICUBIC ,A__ = None ,**A__ ,):
lowercase = get_size_dict(UpperCamelCase__ ,default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
lowercase = get_resize_output_image_size(UpperCamelCase__ ,size=size['''shortest_edge'''] ,default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ ,size=UpperCamelCase__ ,resample=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(UpperCamelCase__ ,size=(size['''height'''], size['''width''']) ,data_format=UpperCamelCase__ ,**UpperCamelCase__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(UpperCamelCase__ ,scale=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__)
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(UpperCamelCase__ ,mean=UpperCamelCase__ ,std=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(UpperCamelCase__ ,param_name='''size''' ,default_to_square=UpperCamelCase__)
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(UpperCamelCase__ ,param_name='''crop_size''' ,default_to_square=UpperCamelCase__)
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(UpperCamelCase__) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
lowercase = [self.resize(image=UpperCamelCase__ ,size=UpperCamelCase__ ,resample=UpperCamelCase__) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=UpperCamelCase__ ,size=UpperCamelCase__) for image in images]
if do_rescale:
lowercase = [self.rescale(image=UpperCamelCase__ ,scale=UpperCamelCase__) for image in images]
if do_normalize:
lowercase = [self.normalize(image=UpperCamelCase__ ,mean=UpperCamelCase__ ,std=UpperCamelCase__) for image in images]
lowercase = [to_channel_dimension_format(UpperCamelCase__ ,UpperCamelCase__) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ ,tensor_type=UpperCamelCase__)
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from manim import *
class lowercase ( a__ ):
def A__ ( self):
lowercase = Rectangle(height=0.5 ,width=0.5)
lowercase = Rectangle(height=0.46 ,width=0.46).set_stroke(width=0)
lowercase = [mem.copy() for i in range(6)]
lowercase = [mem.copy() for i in range(6)]
lowercase = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = VGroup(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = Text('''CPU''' ,font_size=2_4)
lowercase = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowerCamelCase_)
lowercase = [mem.copy() for i in range(4)]
lowercase = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = Text('''GPU''' ,font_size=2_4)
lowercase = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
gpu.move_to([-1, -1, 0])
self.add(lowerCamelCase_)
lowercase = [mem.copy() for i in range(6)]
lowercase = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = Text('''Model''' ,font_size=2_4)
lowercase = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
model.move_to([3, -1.0, 0])
self.add(lowerCamelCase_)
lowercase = []
for i, rect in enumerate(lowerCamelCase_):
rect.set_stroke(lowerCamelCase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase = Rectangle(height=0.46 / 4 ,width=0.46 / 3).set_stroke(width=0.0).set_fill(lowerCamelCase_ ,opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) ,buff=0.02 ,direction=lowerCamelCase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=lowerCamelCase_ ,buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=lowerCamelCase_ ,buff=0.0)
self.add(lowerCamelCase_)
cpu_targs.append(lowerCamelCase_)
lowercase = [mem.copy() for i in range(6)]
lowercase = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
lowercase = Text('''Loaded Checkpoint''' ,font_size=2_4)
lowercase = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,aligned_edge=lowerCamelCase_ ,buff=0.4)
checkpoint.move_to([3, 0.5, 0])
lowercase = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowercase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0])
self.add(lowerCamelCase_ ,lowerCamelCase_)
lowercase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=1_8 ,)
blue_text.next_to(lowerCamelCase_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left())
lowercase = MarkupText(
f'Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.' ,font_size=2_4 ,)
step_a.move_to([2, 2, 0])
self.play(Write(lowerCamelCase_) ,Write(lowerCamelCase_))
self.play(Write(lowerCamelCase_ ,run_time=1) ,Create(lowerCamelCase_ ,run_time=1))
lowercase = []
lowercase = []
for i, rect in enumerate(lowerCamelCase_):
lowercase = fill.copy().set_fill(lowerCamelCase_ ,opacity=0.7)
target.move_to(lowerCamelCase_)
first_animations.append(GrowFromCenter(lowerCamelCase_ ,run_time=1))
lowercase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowerCamelCase_ ,run_time=1.5))
self.play(*lowerCamelCase_)
self.play(*lowerCamelCase_)
self.wait()
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ :int = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["ChineseCLIPFeatureExtractor"]
lowercase__ :Union[str, Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if number > 0:
raise ValueError('''input must be a negative integer''' )
lowercase = len(bin(__lowerCAmelCase )[3:] )
lowercase = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
lowercase = (
(
'''1'''
+ '''0''' * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase ( _UpperCamelCase ):
lowercase_ : Optional[Any] ='''wavlm'''
def __init__( self ,A__=3_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,A__=(5, 2, 2, 2, 2, 2, 2) ,A__=(1_0, 3, 3, 3, 3, 2, 2) ,A__=False ,A__=1_2_8 ,A__=1_6 ,A__=3_2_0 ,A__=8_0_0 ,A__=False ,A__=True ,A__=0.05 ,A__=1_0 ,A__=2 ,A__=0.0 ,A__=1_0 ,A__=3_2_0 ,A__=2 ,A__=0.1 ,A__=1_0_0 ,A__=2_5_6 ,A__=2_5_6 ,A__=0.1 ,A__="mean" ,A__=False ,A__=False ,A__=2_5_6 ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,A__=(5, 3, 3, 1, 1) ,A__=(1, 2, 3, 1, 1) ,A__=5_1_2 ,A__=8_0 ,A__=0 ,A__=1 ,A__=2 ,A__=False ,A__=3 ,A__=2 ,A__=3 ,A__=None ,**A__ ,):
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase)
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(_UpperCAmelCase)
lowercase = list(_UpperCAmelCase)
lowercase = list(_UpperCAmelCase)
lowercase = conv_bias
lowercase = num_buckets
lowercase = max_bucket_distance
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim)
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = num_ctc_classes
lowercase = vocab_size
lowercase = do_stable_layer_norm
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase = num_codevectors_per_group
lowercase = num_codevector_groups
lowercase = contrastive_logits_temperature
lowercase = num_negatives
lowercase = codevector_dim
lowercase = proj_codevector_dim
lowercase = diversity_loss_weight
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(_UpperCAmelCase)
lowercase = list(_UpperCAmelCase)
lowercase = list(_UpperCAmelCase)
lowercase = xvector_output_dim
@property
def A__ ( self):
return functools.reduce(operator.mul ,self.conv_stride ,1)
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ :Dict = "pt"
elif is_tf_available():
lowercase__ :List[Any] = "tf"
else:
lowercase__ :int = "jax"
class lowercase ( _a , unittest.TestCase ):
lowercase_ : Union[str, Any] =PerceiverTokenizer
lowercase_ : List[str] =False
def A__ ( self):
super().setUp()
lowercase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def A__ ( self):
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''')
def A__ ( self ,**A__):
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_A)
def A__ ( self ,A__ ,A__=False ,A__=2_0 ,A__=5):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase = []
for i in range(len(_A)):
try:
lowercase = tokenizer.decode([i] ,clean_up_tokenization_spaces=_A)
except UnicodeDecodeError:
pass
toks.append((i, tok))
lowercase = list(filter(lambda A__: re.match(r'''^[ a-zA-Z]+$''' ,t[1]) ,_A))
lowercase = list(filter(lambda A__: [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=_A) ,_A))
if max_length is not None and len(_A) > max_length:
lowercase = toks[:max_length]
if min_length is not None and len(_A) < min_length and len(_A) > 0:
while len(_A) < min_length:
lowercase = toks + toks
# toks_str = [t[1] for t in toks]
lowercase = [t[0] for t in toks]
# Ensure consistency
lowercase = tokenizer.decode(_A ,clean_up_tokenization_spaces=_A)
if " " not in output_txt and len(_A) > 1:
lowercase = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_A)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_A)
)
if with_prefix_space:
lowercase = ''' ''' + output_txt
lowercase = tokenizer.encode(_A ,add_special_tokens=_A)
return output_txt, output_ids
def A__ ( self):
lowercase = self.perceiver_tokenizer
lowercase = '''Unicode €.'''
lowercase = tokenizer(_A)
lowercase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] ,_A)
# decoding
lowercase = tokenizer.decode(_A)
self.assertEqual(_A ,'''[CLS]Unicode €.[SEP]''')
lowercase = tokenizer('''e è é ê ë''')
lowercase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] ,_A)
# decoding
lowercase = tokenizer.decode(_A)
self.assertEqual(_A ,'''[CLS]e è é ê ë[SEP]''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) ,'''[CLS]e è é ê ë[SEP]''')
def A__ ( self):
lowercase = self.perceiver_tokenizer
lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowercase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowercase = tokenizer(_A ,padding=_A ,return_tensors=_A)
self.assertIsInstance(_A ,_A)
if FRAMEWORK != "jax":
lowercase = list(batch.input_ids.numpy()[0])
else:
lowercase = list(batch.input_ids.tolist()[0])
self.assertListEqual(_A ,_A)
self.assertEqual((2, 3_8) ,batch.input_ids.shape)
self.assertEqual((2, 3_8) ,batch.attention_mask.shape)
def A__ ( self):
lowercase = self.perceiver_tokenizer
lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase = tokenizer(_A ,padding=_A ,return_tensors=_A)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' ,_A)
self.assertIn('''attention_mask''' ,_A)
self.assertNotIn('''decoder_input_ids''' ,_A)
self.assertNotIn('''decoder_attention_mask''' ,_A)
def A__ ( self):
lowercase = self.perceiver_tokenizer
lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
lowercase = tokenizer(
text_target=_A ,max_length=3_2 ,padding='''max_length''' ,truncation=_A ,return_tensors=_A)
self.assertEqual(3_2 ,targets['''input_ids'''].shape[1])
def A__ ( self):
# safety check on max_len default value so we are sure the test works
lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length ,4_2)
# Now let's start the test
lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
lowercase = tempfile.mkdtemp()
lowercase = ''' He is very happy, UNwant\u00E9d,running'''
lowercase = tokenizer.encode(_A ,add_special_tokens=_A)
tokenizer.save_pretrained(_A)
lowercase = tokenizer.__class__.from_pretrained(_A)
lowercase = after_tokenizer.encode(_A ,add_special_tokens=_A)
self.assertListEqual(_A ,_A)
shutil.rmtree(_A)
lowercase = self.get_tokenizers(model_max_length=4_2)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
lowercase = tempfile.mkdtemp()
lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
lowercase = tokenizer.encode(_A ,add_special_tokens=_A)
tokenizer.save_pretrained(_A)
lowercase = tokenizer.__class__.from_pretrained(_A)
lowercase = after_tokenizer.encode(_A ,add_special_tokens=_A)
self.assertListEqual(_A ,_A)
self.assertIn('''new_additional_special_token''' ,after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length ,4_2)
lowercase = tokenizer.__class__.from_pretrained(_A ,model_max_length=4_3)
self.assertEqual(tokenizer.model_max_length ,4_3)
shutil.rmtree(_A)
def A__ ( self):
lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A)
with open(os.path.join(_A ,'''special_tokens_map.json''') ,encoding='''utf-8''') as json_file:
lowercase = json.load(_A)
with open(os.path.join(_A ,'''tokenizer_config.json''') ,encoding='''utf-8''') as json_file:
lowercase = json.load(_A)
lowercase = [f'<extra_id_{i}>' for i in range(1_2_5)]
lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A ,'''special_tokens_map.json''') ,'''w''' ,encoding='''utf-8''') as outfile:
json.dump(_A ,_A)
with open(os.path.join(_A ,'''tokenizer_config.json''') ,'''w''' ,encoding='''utf-8''') as outfile:
json.dump(_A ,_A)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase = tokenizer_class.from_pretrained(
_A ,)
self.assertIn(
'''an_additional_special_token''' ,tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['''an_additional_special_token'''] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' ,lstrip=_A)]
lowercase = tokenizer_class.from_pretrained(
_A ,additional_special_tokens=_A ,)
self.assertIn('''a_new_additional_special_token''' ,tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) ,)
def A__ ( self):
lowercase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8]) ,'''�''')
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase = self.get_tokenizers(fast=_A ,do_lower_case=_A)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowercase = tokenizer.convert_tokens_to_string(_A)
self.assertIsInstance(_A ,_A)
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
lowercase_ : Any =BlenderbotConfig
lowercase_ : int ={}
lowercase_ : Optional[Any] ='''gelu'''
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=False ,A__=9_9 ,A__=3_2 ,A__=2 ,A__=4 ,A__=3_7 ,A__=0.1 ,A__=0.1 ,A__=2_0 ,A__=2 ,A__=1 ,A__=0 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size)
lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) ,1)
lowercase = tf.concat([input_ids, eos_tensor] ,axis=1)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase = prepare_blenderbot_inputs_dict(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase)
return config, inputs_dict
def A__ ( self ,A__ ,A__):
lowercase = TFBlenderbotModel(config=__UpperCamelCase).get_decoder()
lowercase = inputs_dict['''input_ids''']
lowercase = input_ids[:1, :]
lowercase = inputs_dict['''attention_mask'''][:1, :]
lowercase = inputs_dict['''head_mask''']
lowercase = 1
# first forward pass
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,head_mask=__UpperCamelCase ,use_cache=__UpperCamelCase)
lowercase , lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size)
lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2) ,tf.inta)
# append to next input_ids and
lowercase = tf.concat([input_ids, next_tokens] ,axis=-1)
lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1)
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase)[0]
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1])
# select random slice
lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1]))
lowercase = output_from_no_past[:, -3:, random_slice_idx]
lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase ,__UpperCamelCase ,rtol=1E-3)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase_ : str =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : Any =(
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[int] =True
lowercase_ : Dict =False
lowercase_ : int =False
def A__ ( self):
lowercase = TFBlenderbotModelTester(self)
lowercase = ConfigTester(self ,config_class=__UpperCamelCase)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
lowercase_ : List[Any] =['''My friends are cool but they eat too many carbs.''']
lowercase_ : Optional[int] ='''facebook/blenderbot-400M-distill'''
@cached_property
def A__ ( self):
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def A__ ( self):
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
lowercase = self.tokenizer(self.src_text ,return_tensors='''tf''')
lowercase = self.model.generate(
model_inputs.input_ids ,)
lowercase = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=__UpperCamelCase)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase__ :Optional[Any] = logging.getLogger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] ="""summarization"""
lowercase_ : str =["""loss"""]
lowercase_ : str =ROUGE_KEYS
lowercase_ : List[Any] ="""rouge2"""
def __init__( self ,A__ ,**A__):
if hparams.sortish_sampler and hparams.gpus > 1:
lowercase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(A__ ,num_labels=A__ ,mode=self.mode ,**A__)
use_task_specific_params(self.model ,'''summarization''')
save_git_info(self.hparams.output_dir)
lowercase = Path(self.output_dir) / '''metrics.json'''
lowercase = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams ,self.hparams_save_path)
lowercase = 0
lowercase = defaultdict(A__)
lowercase = self.config.model_type
lowercase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowercase = {
'''data_dir''': self.hparams.data_dir,
'''max_source_length''': self.hparams.max_source_length,
'''prefix''': self.model.config.prefix or '''''',
}
lowercase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowercase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowercase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
lowercase = get_git_info()['''repo_sha''']
lowercase = hparams.num_workers
lowercase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,A__):
lowercase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowercase = self.decoder_start_token_id
lowercase = (
SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
lowercase = False
lowercase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowercase = self.hparams.eval_max_gen_length
else:
lowercase = self.model.config.max_length
lowercase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A__ ( self ,A__):
lowercase = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(A__ ,Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir) / '''tok_batch.json''')
lowercase = True
return readable_batch
def A__ ( self ,A__ ,**A__):
return self.model(A__ ,**A__)
def A__ ( self ,A__):
lowercase = self.tokenizer.batch_decode(
A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__)
return lmap(str.strip ,A__)
def A__ ( self ,A__):
lowercase = self.tokenizer.pad_token_id
lowercase , lowercase = batch['''input_ids'''], batch['''attention_mask''']
lowercase = batch['''labels''']
if isinstance(self.model ,A__):
lowercase = self.model._shift_right(A__)
else:
lowercase = shift_tokens_right(A__ ,A__)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowercase = decoder_input_ids
self.save_readable_batch(A__)
lowercase = self(A__ ,attention_mask=A__ ,decoder_input_ids=A__ ,use_cache=A__)
lowercase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowercase = nn.CrossEntropyLoss(ignore_index=A__)
assert lm_logits.shape[-1] == self.vocab_size
lowercase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1]) ,tgt_ids.view(-1))
else:
lowercase = nn.functional.log_softmax(A__ ,dim=-1)
lowercase , lowercase = label_smoothed_nll_loss(
A__ ,A__ ,self.hparams.label_smoothing ,ignore_index=A__)
return (loss,)
@property
def A__ ( self):
return self.tokenizer.pad_token_id
def A__ ( self ,A__ ,A__):
lowercase = self._step(A__)
lowercase = dict(zip(self.loss_names ,A__))
# tokens per batch
lowercase = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
lowercase = batch['''input_ids'''].shape[0]
lowercase = batch['''input_ids'''].eq(self.pad).sum()
lowercase = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A__ ( self ,A__ ,A__):
return self._generative_step(A__)
def A__ ( self ,A__ ,A__="val"):
self.step_count += 1
lowercase = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
lowercase = losses['''loss''']
lowercase = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowercase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowercase = torch.tensor(A__).type_as(A__)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(A__)
lowercase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowercase = self.step_count
self.metrics[prefix].append(A__) # callback writes this to self.metrics_save_path
lowercase = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def A__ ( self ,A__ ,A__):
return calculate_rouge(A__ ,A__)
def A__ ( self ,A__):
lowercase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowercase = self.model.generate(
batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=A__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
lowercase = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowercase = self.ids_to_clean_text(A__)
lowercase = self.ids_to_clean_text(batch['''labels'''])
lowercase = self._step(A__)
lowercase = dict(zip(self.loss_names ,A__))
lowercase = self.calc_generative_metrics(A__ ,A__)
lowercase = np.mean(lmap(A__ ,A__))
base_metrics.update(gen_time=A__ ,gen_len=A__ ,preds=A__ ,target=A__ ,**A__)
return base_metrics
def A__ ( self ,A__ ,A__):
return self._generative_step(A__)
def A__ ( self ,A__):
return self.validation_epoch_end(A__ ,prefix='''test''')
def A__ ( self ,A__):
lowercase = self.n_obs[type_path]
lowercase = self.target_lens[type_path]
lowercase = self.dataset_class(
self.tokenizer ,type_path=A__ ,n_obs=A__ ,max_target_length=A__ ,**self.dataset_kwargs ,)
return dataset
def A__ ( self ,A__ ,A__ ,A__ = False):
lowercase = self.get_dataset(A__)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowercase = dataset.make_sortish_sampler(A__ ,distributed=self.hparams.gpus > 1)
return DataLoader(
A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowercase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1)
return DataLoader(
A__ ,batch_sampler=A__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,)
def A__ ( self):
lowercase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=A__)
return dataloader
def A__ ( self):
return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size)
def A__ ( self):
return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size)
@staticmethod
def A__ ( A__ ,A__):
BaseTransformer.add_model_specific_args(A__ ,A__)
add_generic_args(A__ ,A__)
parser.add_argument(
'''--max_source_length''' ,default=1_0_2_4 ,type=A__ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--max_target_length''' ,default=5_6 ,type=A__ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--val_max_target_length''' ,default=1_4_2 ,type=A__ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--test_max_target_length''' ,default=1_4_2 ,type=A__ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument('''--freeze_encoder''' ,action='''store_true''')
parser.add_argument('''--freeze_embeds''' ,action='''store_true''')
parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=A__)
parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=A__)
parser.add_argument('''--max_tokens_per_batch''' ,type=A__ ,default=A__)
parser.add_argument('''--logger_name''' ,type=A__ ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''')
parser.add_argument('''--n_train''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' ,type=A__ ,default=5_0_0 ,required=A__ ,help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' ,type=A__ ,default='''summarization''' ,required=A__ ,help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' ,type=A__ ,default=0.0 ,required=A__)
parser.add_argument('''--src_lang''' ,type=A__ ,default='''''' ,required=A__)
parser.add_argument('''--tgt_lang''' ,type=A__ ,default='''''' ,required=A__)
parser.add_argument('''--eval_beams''' ,type=A__ ,default=A__ ,required=A__)
parser.add_argument(
'''--val_metric''' ,type=A__ ,default=A__ ,required=A__ ,choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' ,type=A__ ,default=A__ ,help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' ,type=A__ ,default=1 ,required=A__ ,help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' ,type=A__ ,default=-1 ,required=A__ ,help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) ,)
return parser
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple ="""translation"""
lowercase_ : Tuple =["""loss"""]
lowercase_ : Tuple =["""bleu"""]
lowercase_ : Optional[int] ="""bleu"""
def __init__( self ,A__ ,**A__):
super().__init__(A__ ,**A__)
lowercase = hparams.src_lang
lowercase = hparams.tgt_lang
def A__ ( self ,A__ ,A__):
return calculate_bleu(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_UpperCamelCase )
check_output_dir(_UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowercase = SummarizationModule(_UpperCamelCase )
else:
lowercase = TranslationModule(_UpperCamelCase )
lowercase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowercase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowercase = os.environ.get('''WANDB_PROJECT''' , _UpperCamelCase )
lowercase = WandbLogger(name=model.output_dir.name , project=_UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowercase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowercase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowercase = False
lowercase = args.val_metric == '''loss'''
lowercase = generic_train(
_UpperCamelCase , _UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _UpperCamelCase ) , early_stopping_callback=_UpperCamelCase , logger=_UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowercase = ''''''
lowercase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_UpperCamelCase ) )
if checkpoints:
lowercase = checkpoints[-1]
lowercase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
lowercase__ :Dict = pl.Trainer.add_argparse_args(parser)
lowercase__ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase__ :int = parser.parse_args()
main(args)
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
from scipy.stats import spearmanr
import datasets
lowercase__ :Tuple = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase__ :List[str] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase__ :int = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = spearmanr(A__ ,A__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
lowercase__ :List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
lowercase__ :int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
lowercase__ :int = "▁"
class lowercase ( __lowerCAmelCase ):
lowercase_ : Tuple =VOCAB_FILES_NAMES
lowercase_ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] =["input_ids", "attention_mask"]
def __init__( self ,A__ ,A__="<s>" ,A__="</s>" ,A__="</s>" ,A__="<s>" ,A__="<unk>" ,A__="<pad>" ,A__="<mask>" ,A__ = None ,**A__ ,):
lowercase = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ ,lowerCamelCase__) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,)
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCamelCase__))
lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase = len(self.sp_model) - 1
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__)) + [1]
return [1] + ([0] * len(lowerCamelCase__)) + [1, 1] + ([0] * len(lowerCamelCase__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(lowerCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(lowerCamelCase__)
return spm_id if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__)
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(lowerCamelCase__)
lowercase = False
out_string += self.sp_model.decode(lowerCamelCase__)
return out_string.strip()
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(lowerCamelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
lowerCamelCase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,lowerCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__)
return (out_vocab_file,)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase = 1
if upper_limit > 0:
lowercase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowercase__ :Any = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowercase__ :Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase__ :Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
lowercase__ :int = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
lowercase__ :Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowercase__ :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase__ :str = [0] * args.vocab_size
for k, v in counter.items():
lowercase__ :List[Any] = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
lowercase = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
lowercase = nn.Parameter(lowerCamelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
lowercase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# layernorm 1
lowercase = weights[0][0][0]
lowercase = np.asarray(layer_norm_a[0] )
lowercase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowercase = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowercase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowercase = intermediate_weights[2]
# layernorm 2
lowercase = np.asarray(intermediate_weights[0][0] )
lowercase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowercase = np.asarray(intermediate_weights[1][0] )
lowercase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowercase = np.asarray(intermediate_weights[4][0] )
lowercase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# reformer model
lowercase = torch_model.reformer
# word embeds
lowercase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowercase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
lowercase = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowercase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowercase = np.asarray(weights[7][0] )
lowercase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowercase = np.asarray(weights[9][0] )
lowercase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Initialise PyTorch model
lowercase = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as f:
lowercase = pickle.load(lowerCamelCase__ )['''weights''']
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowercase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ :str = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowercase__ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = params
lowercase = np.array(A__)
lowercase = np.array([len(A__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,A__):
return (self.token_ids[index], self.lengths[index])
def __len__( self):
return len(self.lengths)
def A__ ( self):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def A__ ( self):
lowercase = self.params.max_model_input_size
lowercase = self.lengths > max_len
logger.info(f'Splitting {sum(A__)} too long sequences.')
def divide_chunks(A__ ,A__):
return [l[i : i + n] for i in range(0 ,len(A__) ,A__)]
lowercase = []
lowercase = []
if self.params.mlm:
lowercase , lowercase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
lowercase , lowercase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids ,self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
lowercase = []
for sub_s in divide_chunks(seq_ ,max_len - 2):
if sub_s[0] != cls_id:
lowercase = np.insert(A__ ,0 ,A__)
if sub_s[-1] != sep_id:
lowercase = np.insert(A__ ,len(A__) ,A__)
assert len(A__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__)
new_tok_ids.extend(A__)
new_lengths.extend([len(A__) for l in sub_seqs])
lowercase = np.array(A__)
lowercase = np.array(A__)
def A__ ( self):
lowercase = len(self)
lowercase = self.lengths > 1_1
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self)
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
def A__ ( self):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase = self.params.special_tok_ids['''unk_token''']
lowercase = len(self)
lowercase = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
lowercase = (unk_occs / self.lengths) < 0.5
lowercase = self.token_ids[indices]
lowercase = self.lengths[indices]
lowercase = len(self)
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).')
def A__ ( self):
if not self.params.is_master:
return
logger.info(f'{len(self)} sequences')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A__ ( self ,A__):
lowercase = [t[0] for t in batch]
lowercase = [t[1] for t in batch]
assert len(A__) == len(A__)
# Max for paddings
lowercase = max(A__)
# Pad token ids
if self.params.mlm:
lowercase = self.params.special_tok_ids['''pad_token''']
else:
lowercase = self.params.special_tok_ids['''unk_token''']
lowercase = [list(t.astype(A__)) + [pad_idx] * (max_seq_len_ - len(A__)) for t in token_ids]
assert len(tk_) == len(A__)
assert all(len(A__) == max_seq_len_ for t in tk_)
lowercase = torch.tensor(tk_) # (bs, max_seq_len_)
lowercase = torch.tensor(A__) # (bs)
return tk_t, lg_t
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''')
lowercase = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] ,dtype=tf.intaa), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa),
}
lowercase = model(A__)['''last_hidden_state''']
lowercase = tf.TensorShape((1, 6, 7_6_8))
self.assertEqual(output.shape ,A__)
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from math import factorial
def UpperCamelCase ( lowerCAmelCase__ = 100 ):
'''simple docstring'''
return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowercase__ :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ :Dict = 256
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =['''melgan''']
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
# From MELGAN
lowercase = math.log(1E-5) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ ,continuous_encoder=A__ ,decoder=A__ ,scheduler=A__ ,melgan=A__ ,)
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False):
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ ,self.min_value ,self.max_value)
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False):
lowercase , lowercase = input_range
lowercase = torch.clip(A__ ,A__ ,A__) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self ,A__ ,A__ ,A__):
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ ,encoder_inputs_mask=A__)
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ ,encoder_inputs_mask=A__)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self ,A__ ,A__ ,A__):
lowercase = noise_time
if not torch.is_tensor(A__):
lowercase = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device)
elif torch.is_tensor(A__) and len(timesteps.shape) == 0:
lowercase = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device)
lowercase = self.decoder(
encodings_and_masks=A__ ,decoder_input_tokens=A__ ,decoder_noise_time=A__)
return logits
@torch.no_grad()
def __call__( self ,A__ ,A__ = None ,A__ = 1_0_0 ,A__ = True ,A__ = "numpy" ,A__ = None ,A__ = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa)
lowercase = np.zeros([1, 0, self.n_dims] ,np.floataa)
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device)
for i, encoder_input_tokens in enumerate(A__):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device ,dtype=self.decoder.dtype)
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ ,output_range=[-1.0, 1.0] ,clip=A__)
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) ,continuous_inputs=A__ ,continuous_mask=A__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=A__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(A__)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
lowercase = self.decode(
encodings_and_masks=A__ ,input_tokens=A__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,generator=A__).prev_sample
lowercase = self.scale_to_features(A__ ,input_range=[-1.0, 1.0])
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__)
logger.info('''Generated segment''' ,A__)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ :Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase :
lowercase_ : str =PegasusConfig
lowercase_ : str ={}
lowercase_ : List[Any] ='''gelu'''
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=False ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__=0.1 ,A__=0.1 ,A__=2_0 ,A__=2 ,A__=1 ,A__=0 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size).clip(3 ,self.vocab_size)
lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) ,1)
lowercase = np.concatenate([input_ids, eos_tensor] ,axis=1)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase = prepare_pegasus_inputs_dict(A__ ,A__ ,A__)
return config, inputs_dict
def A__ ( self ,A__ ,A__ ,A__):
lowercase = 2_0
lowercase = model_class_name(A__)
lowercase = model.encode(inputs_dict['''input_ids'''])
lowercase , lowercase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase = model.init_cache(decoder_input_ids.shape[0] ,A__ ,A__)
lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype='''i4''')
lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
lowercase = model.decode(
decoder_input_ids[:, :-1] ,A__ ,decoder_attention_mask=A__ ,past_key_values=A__ ,decoder_position_ids=A__ ,)
lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='''i4''')
lowercase = model.decode(
decoder_input_ids[:, -1:] ,A__ ,decoder_attention_mask=A__ ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A__ ,)
lowercase = model.decode(A__ ,A__)
lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 ,msg=f'Max diff is {diff}')
def A__ ( self ,A__ ,A__ ,A__):
lowercase = 2_0
lowercase = model_class_name(A__)
lowercase = model.encode(inputs_dict['''input_ids'''])
lowercase , lowercase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] ,axis=-1 ,)
lowercase = model.init_cache(decoder_input_ids.shape[0] ,A__ ,A__)
lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
lowercase = model.decode(
decoder_input_ids[:, :-1] ,A__ ,decoder_attention_mask=A__ ,past_key_values=A__ ,decoder_position_ids=A__ ,)
lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='''i4''')
lowercase = model.decode(
decoder_input_ids[:, -1:] ,A__ ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A__ ,decoder_position_ids=A__ ,)
lowercase = model.decode(A__ ,A__ ,decoder_attention_mask=A__)
lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 ,msg=f'Max diff is {diff}')
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase = np.not_equal(lowerCAmelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase_ : str =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase_ : Union[str, Any] =True
lowercase_ : Optional[Any] =False
lowercase_ : Any =False
lowercase_ : Optional[int] =False
def A__ ( self):
lowercase = FlaxPegasusModelTester(self)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = model_class(A__)
@jax.jit
def encode_jitted(A__ ,A__=None ,**A__):
return model.encode(input_ids=A__ ,attention_mask=A__)
with self.subTest('''JIT Enabled'''):
lowercase = encode_jitted(**A__).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
lowercase = encode_jitted(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__))
for jitted_output, output in zip(A__ ,A__):
self.assertEqual(jitted_output.shape ,output.shape)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase = model_class(A__)
lowercase = model.encode(inputs_dict['''input_ids'''] ,inputs_dict['''attention_mask'''])
lowercase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(A__ ,A__ ,A__):
return model.decode(
decoder_input_ids=A__ ,decoder_attention_mask=A__ ,encoder_outputs=A__ ,)
with self.subTest('''JIT Enabled'''):
lowercase = decode_jitted(**A__).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
lowercase = decode_jitted(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__))
for jitted_output, output in zip(A__ ,A__):
self.assertEqual(jitted_output.shape ,output.shape)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('''google/pegasus-large''' ,from_pt=A__)
lowercase = np.ones((1, 1))
lowercase = model(A__)
self.assertIsNotNone(A__)
@slow
def A__ ( self):
lowercase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
lowercase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
lowercase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowercase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowercase = tokenizer(A__ ,return_tensors='''np''' ,truncation=A__ ,max_length=5_1_2 ,padding=A__)
lowercase = model.generate(**A__ ,num_beams=2).sequences
lowercase = tokenizer.batch_decode(A__ ,skip_special_tokens=A__)
assert tgt_text == decoded
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :int = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''nllb-moe'''
lowercase_ : Optional[int] =['''past_key_values''']
lowercase_ : Any ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,A__=1_2_8_1_1_2 ,A__=1_0_2_4 ,A__=1_2 ,A__=4_0_9_6 ,A__=1_6 ,A__=1_2 ,A__=4_0_9_6 ,A__=1_6 ,A__=0.05 ,A__=0.05 ,A__=True ,A__=True ,A__="relu" ,A__=1_0_2_4 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.02 ,A__=2 ,A__=True ,A__=False ,A__="float32" ,A__=False ,A__=1_2_8 ,A__=6_4 ,A__=4 ,A__=4 ,A__=0.001 ,A__=0.001 ,A__="all" ,A__=False ,A__=False ,A__=1.0 ,A__=0.2 ,A__=1 ,A__=0 ,A__=2 ,A__=False ,**A__ ,):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,is_encoder_decoder=A__ ,decoder_start_token_id=A__ ,**A__ ,)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self ,A__ ,A__=1_4 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_input_mask
lowercase = use_labels
lowercase = use_mc_token_ids
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
if self.use_mc_token_ids:
lowercase = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self):
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = CTRLModel(config=A__)
model.to(A__)
model.eval()
model(A__ ,token_type_ids=A__ ,head_mask=A__)
model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) ,config.n_layer)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = CTRLLMHeadModel(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.loss.shape ,())
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,*A__):
lowercase = self.num_labels
lowercase = CTRLForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = model(A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[int] =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ : List[Any] =(CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ : Union[str, Any] =(
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[Any] =True
lowercase_ : List[str] =False
lowercase_ : Optional[int] =False
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self):
lowercase = CTRLModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,n_embd=3_7)
def A__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
@slow
def A__ ( self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = CTRLModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
@require_torch
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self):
lowercase = CTRLLMHeadModel.from_pretrained('''ctrl''')
model.to(A__)
lowercase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] ,dtype=torch.long ,device=A__) # Legal the president is
lowercase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase = model.generate(A__ ,do_sample=A__)
self.assertListEqual(output_ids[0].tolist() ,A__)
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ = False ,):
super().__init__()
lowercase = nn.Embedding(A__ ,A__)
lowercase = nn.Embedding(A__ ,A__)
lowercase = False
lowercase = nn.Dropout(p=A__)
lowercase = TaConfig(
vocab_size=A__ ,d_model=A__ ,num_heads=A__ ,d_kv=A__ ,d_ff=A__ ,dropout_rate=A__ ,feed_forward_proj=A__ ,is_decoder=A__ ,is_encoder_decoder=A__ ,)
lowercase = nn.ModuleList()
for lyr_num in range(A__):
lowercase = TaBlock(A__)
self.encoders.append(A__)
lowercase = TaLayerNorm(A__)
lowercase = nn.Dropout(p=A__)
def A__ ( self ,A__ ,A__):
lowercase = self.token_embedder(A__)
lowercase = encoder_input_tokens.shape[1]
lowercase = torch.arange(A__ ,device=encoder_input_tokens.device)
x += self.position_encoding(A__)
lowercase = self.dropout_pre(A__)
# inverted the attention mask
lowercase = encoder_input_tokens.size()
lowercase = self.get_extended_attention_mask(A__ ,A__)
for lyr in self.encoders:
lowercase = lyr(A__ ,A__)[0]
lowercase = self.layer_norm(A__)
return self.dropout_post(A__), encoder_inputs_mask
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :int = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase = torch.load(hf_hub_download(repo_id=lowerCAmelCase__ , filename='''pytorch_model.bin''' ) )
lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase = tensor_value
lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ , config=lowerCAmelCase__ , state_dict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
# convert tokenizer
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
from math import sqrt
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( lowerCAmelCase__ = 1_0001 ):
'''simple docstring'''
lowercase = 0
lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ :List[str] = Lock()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase = temp_rs
lowercase = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase = temp_rs
lowercase = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowerCAmelCase__ )
lowercase = odd_even_transposition(lowerCAmelCase__ )
print('''Sorted List\n''' )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
lowercase = set({'''(''', '''[''', '''{'''} )
lowercase = set({''')''', ''']''', '''}'''} )
lowercase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowerCAmelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase__ ) == 0 or (len(lowerCAmelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase__ ) == 0
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = input('''Enter sequence of brackets: ''' )
if is_balanced(lowerCAmelCase__ ):
print(lowerCAmelCase__ , '''is balanced''' )
else:
print(lowerCAmelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = NystromformerModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = NystromformerForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = NystromformerForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = NystromformerForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = NystromformerForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = NystromformerForMultipleChoice(config=A__)
model.to(A__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[str] =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] =(
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Any =False
lowercase_ : Dict =False
def A__ ( self):
lowercase = NystromformerModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
@slow
def A__ ( self):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = NystromformerModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''')
lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
lowercase = model(A__)[0]
lowercase = torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
@slow
def A__ ( self):
lowercase = '''the [MASK] of Belgium is Brussels'''
lowercase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''')
lowercase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''')
lowercase = tokenizer(A__ ,return_tensors='''pt''')
with torch.no_grad():
lowercase = model(encoding.input_ids).logits
lowercase = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(A__) ,'''capital''')
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=3 ,A__=3_2 ,A__=3 ,A__=1_0 ,A__=[1_0, 2_0, 3_0, 4_0] ,A__=[1, 1, 2, 1] ,A__=True ,A__=True ,A__="relu" ,A__=3 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__)
def A__ ( self):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.num_labels)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = TFRegNetModel(config=A__)
lowercase = model(A__ ,training=A__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFRegNetForImageClassification(A__)
lowercase = model(A__ ,labels=A__ ,training=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[int] =(
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Any =False
lowercase_ : Optional[Any] =False
lowercase_ : Union[str, Any] =False
lowercase_ : Tuple =False
lowercase_ : Any =False
def A__ ( self):
lowercase = TFRegNetModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__)
def A__ ( self):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 ,reason='''TF does not support backprop for grouped convolutions on CPU.''' ,)
@slow
def A__ ( self):
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
def check_hidden_states_output(A__ ,A__ ,A__):
lowercase = model_class(A__)
lowercase = model(**self._prepare_for_class(A__ ,A__) ,training=A__)
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__) ,expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A__ ,A__ ,A__ ,A__={}):
lowercase = model(A__ ,return_dict=A__ ,**A__)
lowercase = model(A__ ,return_dict=A__ ,**A__).to_tuple()
def recursive_check(A__ ,A__):
if isinstance(A__ ,(List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(A__ ,A__):
recursive_check(A__ ,A__)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(A__ ,A__)) ,msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}'
) ,)
recursive_check(A__ ,A__)
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
check_equivalence(A__ ,A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
check_equivalence(A__ ,A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = self._prepare_for_class(A__ ,A__)
check_equivalence(A__ ,A__ ,A__ ,{'''output_hidden_states''': True})
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
lowercase = self._prepare_for_class(A__ ,A__ ,return_labels=A__)
check_equivalence(A__ ,A__ ,A__ ,{'''output_hidden_states''': True})
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
@slow
def A__ ( self):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFRegNetModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def A__ ( self):
lowercase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''tf''')
# forward pass
lowercase = model(**A__ ,training=A__)
# verify the logits
lowercase = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,A__)
lowercase = tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] ,A__ ,atol=1E-4)
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase__ :int = "\\n\n"
lowercase__ :Optional[int] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase__ :List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) ,reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] ,)
def A__ ( self ,A__ ,A__ ,A__ = 1_6 ,A__ = True ,A__=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase = '''cuda'''
else:
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowercase = AutoModelForCausalLM.from_pretrained(A__)
lowercase = model.to(A__)
lowercase = AutoTokenizer.from_pretrained(A__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(A__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase = model.config.max_length - 1
else:
lowercase = model.config.max_length
lowercase = tokenizer(
A__ ,add_special_tokens=A__ ,padding=A__ ,truncation=A__ ,max_length=A__ ,return_tensors='''pt''' ,return_attention_mask=A__ ,).to(A__)
lowercase = encodings['''input_ids''']
lowercase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) ,1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) ,2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase = []
lowercase = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 ,len(A__) ,A__)):
lowercase = min(start_index + batch_size ,len(A__))
lowercase = encoded_texts[start_index:end_index]
lowercase = attn_masks[start_index:end_index]
if add_start_token:
lowercase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(A__)
lowercase = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1)
lowercase = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa).to(A__), attn_mask] ,dim=1)
lowercase = encoded_batch
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__).logits
lowercase = out_logits[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = attn_mask[..., 1:].contiguous()
lowercase = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2) ,A__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A__)}
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
from __future__ import annotations
lowercase__ :Optional[Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class lowercase :
def __init__( self ,A__ ,A__):
lowercase = graph
# mapping node to its parent in resulting breadth first tree
lowercase = {}
lowercase = source_vertex
def A__ ( self):
lowercase = {self.source_vertex}
lowercase = None
lowercase = [self.source_vertex] # first in first out queue
while queue:
lowercase = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A__)
lowercase = vertex
queue.append(A__)
def A__ ( self ,A__):
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase = self.parent.get(A__)
if target_vertex_parent is None:
lowercase = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(A__)
return self.shortest_path(A__) + f'->{target_vertex}'
if __name__ == "__main__":
lowercase__ :Tuple = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="attention" ):
'''simple docstring'''
lowercase = lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase = (wi_a, wi_a)
else:
lowercase = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def UpperCamelCase ( lowerCAmelCase__ , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
lowercase = traverse_util.flatten_dict(variables['''target'''] )
lowercase = {'''/'''.join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase__ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''attention''' )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' ).T
lowercase = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , '''encoder''' ).T
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''self_attention''' )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' ).T
lowercase = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old['''decoder/logits_dense/kernel'''].T
return new
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase = state_dict['''shared.weight''']
return state_dict
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
lowercase = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
lowercase = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ):
'''simple docstring'''
lowercase = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = UMTaEncoderModel(lowerCAmelCase__ )
else:
lowercase = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print('''Done''' )
if __name__ == "__main__":
lowercase__ :Tuple = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
lowercase__ :List[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ):
'''simple docstring'''
lowercase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCAmelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from __future__ import annotations
import bisect
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 1
while left <= right:
lowercase = left + (right - left) // 2
lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase = midpoint - 1
else:
lowercase = midpoint + 1
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = bisect.bisect_left(lowerCAmelCase__ , lowerCAmelCase__ )
if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if right < left:
return None
lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , midpoint + 1 , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = input("Enter numbers separated by comma:\n").strip()
lowercase__ :List[str] = sorted(int(item) for item in user_input.split(","))
lowercase__ :Union[str, Any] = int(input("Enter a single number to be found in the list:\n"))
lowercase__ :int = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import sys
import turtle
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowercase__ :Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowercase__ :Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowerCAmelCase__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase__ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['''url'''] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
lowercase__ :Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase = v
else:
lowercase = v
lowercase = chkpt['''params''']
lowercase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
lowercase = chkpt['''dico_word2id''']
lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase__ :List[Any] = logging.getLogger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__=-1):
# in NER datasets, the last column is usually reserved for NER label
lowercase : str = label_idx
def A__ ( self ,A__ ,A__):
if isinstance(A__ ,A__):
lowercase : Optional[int] = mode.value
lowercase : Union[str, Any] = os.path.join(A__ ,f'{mode}.txt')
lowercase : Any = 1
lowercase : Optional[Any] = []
with open(A__ ,encoding='''utf-8''') as f:
lowercase : List[Any] = []
lowercase : Optional[int] = []
for line in f:
if line.startswith('''-DOCSTART-''') or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=A__ ,labels=A__))
guid_index += 1
lowercase : List[str] = []
lowercase : List[Any] = []
else:
lowercase : Dict = line.split(''' ''')
words.append(splits[0])
if len(A__) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,''''''))
else:
# Examples could have no label for mode = "test"
labels.append('''O''')
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=A__ ,labels=A__))
return examples
def A__ ( self ,A__ ,A__ ,A__):
lowercase : List[str] = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''') or line == "" or line == "\n":
writer.write(A__)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase : Union[str, Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0) + '''\n'''
writer.write(A__)
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0])
def A__ ( self ,A__):
if path:
with open(A__ ,'''r''') as f:
lowercase : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
lowercase : Dict = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2)
def A__ ( self ,A__):
if path:
with open(A__ ,'''r''') as f:
lowercase : List[str] = f.read().splitlines()
if "O" not in labels:
lowercase : Tuple = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self ,A__ ,A__):
if isinstance(A__ ,A__):
lowercase : Tuple = mode.value
lowercase : int = os.path.join(A__ ,f'{mode}.txt')
lowercase : Union[str, Any] = 1
lowercase : Union[str, Any] = []
with open(A__ ,encoding='''utf-8''') as f:
for sentence in parse_incr(A__):
lowercase : Any = []
lowercase : int = []
for token in sentence:
words.append(token['''form'''])
labels.append(token['''upos'''])
assert len(A__) == len(A__)
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=A__ ,labels=A__))
guid_index += 1
return examples
def A__ ( self ,A__ ,A__ ,A__):
lowercase : List[str] = 0
for sentence in parse_incr(A__):
lowercase : List[str] = preds_list[example_id]
lowercase : Union[str, Any] = ''''''
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0)}) '
out += "\n"
writer.write(A__)
example_id += 1
def A__ ( self ,A__):
if path:
with open(A__ ,'''r''') as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase :
pass
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase__ :Optional[int] = True
from torch.cuda.amp import autocast
lowercase__ :Any = logging.getLogger(__name__)
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class lowercase :
lowercase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase_ : Optional[bool] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase_ : Optional[float] =field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
lowercase_ : Optional[float] =field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
lowercase_ : Optional[float] =field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
lowercase_ : Optional[float] =field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
lowercase_ : Optional[float] =field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
lowercase_ : Optional[float] =field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class lowercase :
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase_ : Optional[str] =field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
lowercase_ : List[str] =list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class lowercase :
lowercase_ : WavaVecaProcessor
lowercase_ : Union[bool, str] =True
lowercase_ : Optional[int] =None
lowercase_ : Optional[int] =None
lowercase_ : Optional[int] =None
lowercase_ : Optional[int] =None
def __call__( self ,A__):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase = [{'''input_values''': feature['''input_values''']} for feature in features]
lowercase = [{'''input_ids''': feature['''labels''']} for feature in features]
lowercase = self.processor.pad(
A__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' ,)
lowercase = self.processor.pad(
labels=A__ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors='''pt''' ,)
# replace padding with -100 to ignore loss correctly
lowercase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1) ,-1_0_0)
lowercase = labels
return batch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self ,A__ ,A__):
model.train()
lowercase = self._prepare_inputs(A__)
if self.use_amp:
with autocast():
lowercase = self.compute_loss(A__ ,A__)
else:
lowercase = self.compute_loss(A__ ,A__)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A__).backward()
elif self.use_apex:
with amp.scale_loss(A__ ,self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A__)
else:
loss.backward()
return loss.detach()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowercase = f'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(lowerCAmelCase__ ):
lowercase = re.sub(lowerCAmelCase__ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowercase = train_dataset.map(lowerCAmelCase__ , remove_columns=['''sentence'''] )
lowercase = eval_dataset.map(lowerCAmelCase__ , remove_columns=['''sentence'''] )
def extract_all_chars(lowerCAmelCase__ ):
lowercase = ''' '''.join(batch['''text'''] )
lowercase = list(set(lowerCAmelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=train_dataset.column_names , )
lowercase = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=eval_dataset.column_names , )
lowercase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowercase = {v: k for k, v in enumerate(lowerCAmelCase__ )}
lowercase = vocab_dict[''' ''']
del vocab_dict[" "]
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ )
lowercase = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
lowercase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
lowercase = train_dataset.select(range(lowerCAmelCase__ ) )
if data_args.max_val_samples is not None:
lowercase = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase__ ):
lowercase , lowercase = torchaudio.load(batch['''path'''] )
lowercase = resampler(lowerCAmelCase__ ).squeeze().numpy()
lowercase = 1_6000
lowercase = batch['''text''']
return batch
lowercase = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
lowercase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowerCAmelCase__ )
return batch
lowercase = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
lowercase = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase = datasets.load_metric('''wer''' )
def compute_metrics(lowerCAmelCase__ ):
lowercase = pred.predictions
lowercase = np.argmax(lowerCAmelCase__ , axis=-1 )
lowercase = processor.tokenizer.pad_token_id
lowercase = processor.batch_decode(lowerCAmelCase__ )
# we do not want to group tokens when computing the metrics
lowercase = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase__ )
lowercase = wer_metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase = DataCollatorCTCWithPadding(processor=lowerCAmelCase__ , padding=lowerCAmelCase__ )
# Initialize our Trainer
lowercase = CTCTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase = model_args.model_name_or_path
else:
lowercase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
lowercase = train_result.metrics
lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase = trainer.evaluate()
lowercase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase__ )
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ :Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = list(s_dict.keys() )
for key in keys:
lowercase = R'''.*/layers_(\d+)'''
lowercase = key
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , lowerCAmelCase__ )
lowercase = R'''(encoder|decoder)\/'''
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).groups()
if groups[0] == "encoder":
lowercase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , lowerCAmelCase__ )
lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , lowerCAmelCase__ )
elif groups[0] == "decoder":
lowercase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , lowerCAmelCase__ )
lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , lowerCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowercase = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'{key} -> {new_key}' )
lowercase = s_dict.pop(lowerCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowercase = s_dict[key].shape[0]
lowercase = s_dict[key]
for idx in range(lowerCAmelCase__ ):
lowercase = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase__ )
return s_dict
lowercase__ :Dict = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase__ , '''r''' ) as f:
lowercase = f.read()
lowercase = re.findall(R'''(.*) = ([0-9.]*)''' , lowerCAmelCase__ )
lowercase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowercase = float(lowerCAmelCase__ ) if '''.''' in value else int(lowerCAmelCase__ )
lowercase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , lowerCAmelCase__ )[0]
lowercase = str(activation[1] )
lowercase = num_experts
lowercase = SwitchTransformersConfig(**lowerCAmelCase__ )
return config
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="./" , lowerCAmelCase__=8 ):
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
if gin_file is not None:
lowercase = convert_gin_to_config(lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase = SwitchTransformersConfig.from_pretrained(lowerCAmelCase__ )
lowercase = SwitchTransformersForConditionalGeneration(lowerCAmelCase__ )
lowercase = flax_params['''target''']
lowercase = flatten_dict(lowerCAmelCase__ , sep='''/''' )
lowercase = rename_keys(lowerCAmelCase__ )
lowercase = unflatten_dict(lowerCAmelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ :Dict = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :str = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''vit'''
def __init__( self ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1E-12 ,A__=2_2_4 ,A__=1_6 ,A__=3 ,A__=True ,A__=1_6 ,**A__ ,):
super().__init__(**A__)
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
lowercase = encoder_stride
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ ( self):
return 1E-4
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ : Dict =(
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : Tuple =False
lowercase_ : Any =False
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa)
return inputs_dict
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=3_2 ,A__=2 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = embedding_size
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,embedding_size=self.embedding_size ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertModel(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
lowercase = [input_ids, input_mask]
lowercase = model(A__)
lowercase = model(A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForMaskedLM(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForNextSentencePrediction(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForPreTraining(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(
result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFMobileBertForSequenceClassification(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = TFMobileBertForMultipleChoice(config=A__)
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFMobileBertForTokenClassification(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForQuestionAnswering(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self):
lowercase = TFMobileBertModelTest.TFMobileBertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A__)
@slow
def A__ ( self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase = TFMobileBertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape ,A__)
lowercase = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] ,A__ ,atol=1E-4)
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
import math
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 ):
'''simple docstring'''
lowercase = end or len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = i
lowercase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowercase = array[temp_index - 1]
temp_index -= 1
lowercase = temp_index_value
return array
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # Max Heap
'''simple docstring'''
lowercase = index
lowercase = 2 * index + 1 # Left Node
lowercase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowercase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowercase = right_index
if largest != index:
lowercase , lowercase = array[largest], array[index]
heapify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowercase , lowercase = array[0], array[i]
heapify(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
return array
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = low
lowercase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowercase , lowercase = array[j], array[i]
i += 1
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
return array
lowercase = 2 * math.ceil(math.loga(len(lowerCAmelCase__ ) ) )
lowercase = 16
return intro_sort(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCAmelCase__ )
max_depth -= 1
lowercase = median_of_a(lowerCAmelCase__ , lowerCAmelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowercase = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
intro_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = p
return insertion_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ :Optional[int] = input("Enter numbers separated by a comma : ").strip()
lowercase__ :List[str] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :Dict = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : int ='''xlnet'''
lowercase_ : List[str] =['''mems''']
lowercase_ : Union[str, Any] ={
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,A__=3_2_0_0_0 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=True ,A__="bi" ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__=5_1_2 ,A__=None ,A__=True ,A__=False ,A__=False ,A__=-1 ,A__=False ,A__="last" ,A__=True ,A__="tanh" ,A__=0.1 ,A__=5 ,A__=5 ,A__=5 ,A__=1 ,A__=2 ,**A__ ,):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0')
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})')
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' ,A__ ,)
lowercase = kwargs['''use_cache''']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
@property
def A__ ( self):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.')
return -1
@max_position_embeddings.setter
def A__ ( self ,A__):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.')
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = inspect.getfile(accelerate.test_utils)
lowercase = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase = test_metrics
@require_cpu
def A__ ( self):
debug_launcher(self.test_metrics.main ,num_processes=1)
@require_cpu
def A__ ( self):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def A__ ( self):
self.test_metrics.main()
@require_multi_gpu
def A__ ( self):
print(f'Found {torch.cuda.device_count()} devices.')
lowercase = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A__ ,env=os.environ.copy())
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
import gc
import threading
import time
import psutil
import torch
class lowercase :
def __init__( self):
lowercase = psutil.Process()
lowercase = False
def A__ ( self):
lowercase = -1
while True:
lowercase = max(self.process.memory_info().rss ,self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self):
lowercase = True
lowercase = threading.Thread(target=self.peak_monitor)
lowercase = True
self.thread.start()
def A__ ( self):
lowercase = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ :List[Any] = PeakCPUMemory()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
lowercase = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
return measures
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB' )
lowercase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
from collections.abc import Callable
class lowercase :
def __init__( self ,A__ = None):
# Stores actual heap items.
lowercase = []
# Stores indexes of each item for supporting updates and deletion.
lowercase = {}
# Stores current size of heap.
lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase = key or (lambda A__: x)
def A__ ( self ,A__):
return int((i - 1) / 2) if i > 0 else None
def A__ ( self ,A__):
lowercase = int(2 * i + 1)
return left if 0 < left < self.size else None
def A__ ( self ,A__):
lowercase = int(2 * i + 2)
return right if 0 < right < self.size else None
def A__ ( self ,A__ ,A__):
lowercase , lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase = self.arr[j], self.arr[i]
def A__ ( self ,A__ ,A__):
return self.arr[i][1] < self.arr[j][1]
def A__ ( self ,A__):
lowercase = self._left(A__)
lowercase = self._right(A__)
lowercase = i
if left is not None and not self._cmp(A__ ,A__):
lowercase = left
if right is not None and not self._cmp(A__ ,A__):
lowercase = right
return valid_parent
def A__ ( self ,A__):
lowercase = self._parent(A__)
while parent is not None and not self._cmp(A__ ,A__):
self._swap(A__ ,A__)
lowercase , lowercase = parent, self._parent(A__)
def A__ ( self ,A__):
lowercase = self._get_valid_parent(A__)
while valid_parent != index:
self._swap(A__ ,A__)
lowercase , lowercase = valid_parent, self._get_valid_parent(A__)
def A__ ( self ,A__ ,A__):
if item not in self.pos_map:
return
lowercase = self.pos_map[item]
lowercase = [item, self.key(A__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A__)
self._heapify_down(A__)
def A__ ( self ,A__):
if item not in self.pos_map:
return
lowercase = self.pos_map[item]
del self.pos_map[item]
lowercase = self.arr[self.size - 1]
lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A__)
self._heapify_down(A__)
def A__ ( self ,A__ ,A__):
lowercase = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(A__)])
else:
lowercase = [item, self.key(A__)]
lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1)
def A__ ( self):
return self.arr[0] if self.size else None
def A__ ( self):
lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase ( lowerCAmelCase__=32 , lowerCAmelCase__=10 , lowerCAmelCase__=100 , lowerCAmelCase__=1026 , lowerCAmelCase__=True , lowerCAmelCase__="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase__="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowercase , lowercase = generate_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , number=lowerCAmelCase__ , min_len=1026 , trim=lowerCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase = load_gpta('''gpt2''' ).to(lowerCAmelCase__ )
print('''computing perplexity on objective set''' )
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).item()
print('''perplexity on objective set:''' , lowerCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=15 , lowerCAmelCase__=128 , lowerCAmelCase__=100 , lowerCAmelCase__="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
lowercase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase = SecondaryLearner(lowerCAmelCase__ )
# Train secondary learner
lowercase = train_secondary_learner(
lowerCAmelCase__ , lowerCAmelCase__ , max_epochs=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , eval_freq=100 , igf_model_path=lowerCAmelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=32 , lowerCAmelCase__=1000 , lowerCAmelCase__=16 , lowerCAmelCase__=1.0 , lowerCAmelCase__=recopy_gpta , lowerCAmelCase__=None , lowerCAmelCase__=10 , lowerCAmelCase__="gpt2_finetuned.pt" , ):
'''simple docstring'''
lowercase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase = RandomSampler(lowerCAmelCase__ )
lowercase = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ )
lowercase = max_steps // (len(lowerCAmelCase__ )) + 1
lowercase = 0
lowercase = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase , lowercase , lowercase = recopy_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase__ )
secondary_learner.eval()
lowercase = []
lowercase = 0
lowercase = []
lowercase = []
# Compute the performance of the transformer model at the beginning
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print('''Test perplexity, step''' , lowerCAmelCase__ , ''':''' , lowerCAmelCase__ )
for epoch in range(int(lowerCAmelCase__ ) ):
for step, example in enumerate(lowerCAmelCase__ ):
torch.cuda.empty_cache()
lowercase = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
lowercase = True
if secondary_learner is not None:
lowercase = secondary_learner.forward(
torch.tensor(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase = -1
if predicted_q < threshold:
lowercase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print('''Test perplexity, step''' , lowerCAmelCase__ , ''':''' , lowerCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=lowerCAmelCase__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=lowerCAmelCase__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=lowerCAmelCase__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=lowerCAmelCase__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=lowerCAmelCase__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=lowerCAmelCase__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=lowerCAmelCase__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=lowerCAmelCase__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=lowerCAmelCase__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=lowerCAmelCase__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=lowerCAmelCase__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowerCAmelCase__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase = training_secondary_learner(
lowerCAmelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase , lowercase = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=lowerCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowerCAmelCase__ , secondary_learner=lowerCAmelCase__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ :List[Any] = None
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ :List[Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase__ :List[Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
lowercase__ :Union[str, Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple =VOCAB_FILES_NAMES
lowercase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] =['''input_ids''', '''attention_mask''']
lowercase_ : Optional[int] =MBartTokenizer
lowercase_ : List[int] =[]
lowercase_ : List[int] =[]
def __init__( self ,A__=None ,A__=None ,A__="<s>" ,A__="</s>" ,A__="</s>" ,A__="<s>" ,A__="<unk>" ,A__="<pad>" ,A__="<mask>" ,A__=None ,A__=None ,A__=None ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(A__ ,lstrip=A__ ,rstrip=A__) if isinstance(A__ ,A__) else mask_token
super().__init__(
vocab_file=A__ ,tokenizer_file=A__ ,bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,cls_token=A__ ,unk_token=A__ ,pad_token=A__ ,mask_token=A__ ,src_lang=A__ ,tgt_lang=A__ ,additional_special_tokens=A__ ,**A__ ,)
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
lowercase = {
lang_code: self.convert_tokens_to_ids(A__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase = src_lang if src_lang is not None else '''en_XX'''
lowercase = self.convert_tokens_to_ids(self._src_lang)
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A__ ( self):
return self._src_lang
@src_lang.setter
def A__ ( self ,A__):
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,**A__):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
lowercase = src_lang
lowercase = self(A__ ,add_special_tokens=A__ ,return_tensors=A__ ,**A__)
lowercase = self.convert_tokens_to_ids(A__)
lowercase = tgt_lang_id
return inputs
def A__ ( self ,A__ ,A__ = "en_XX" ,A__ = None ,A__ = "ro_RO" ,**A__ ,):
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(A__ ,A__ ,**A__)
def A__ ( self):
return self.set_src_lang_special_tokens(self.src_lang)
def A__ ( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A__ ( self ,A__):
lowercase = self.convert_tokens_to_ids(A__)
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens)
lowercase = self.convert_ids_to_tokens(self.suffix_tokens)
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str ,pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens)) ,)
def A__ ( self ,A__):
lowercase = self.convert_tokens_to_ids(A__)
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens)
lowercase = self.convert_ids_to_tokens(self.suffix_tokens)
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str ,pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens)) ,)
def A__ ( self ,A__ ,A__ = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__):
copyfile(self.vocab_file ,A__)
return (out_vocab_file,)
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ :int = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__="pt" ):
'''simple docstring'''
lowercase = {'''add_prefix_space''': True} if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not line.startswith(''' ''' ) else {}
lowercase = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , ):
'''simple docstring'''
lowercase = input_ids.ne(lowerCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__="train" ,A__=None ,A__=None ,A__=None ,A__="" ,):
super().__init__()
lowercase = Path(A__).joinpath(type_path + '''.source''')
lowercase = Path(A__).joinpath(type_path + '''.target''')
lowercase = self.get_char_lens(self.src_file)
lowercase = max_source_length
lowercase = max_target_length
assert min(self.src_lens) > 0, f'found empty line in {self.src_file}'
lowercase = tokenizer
lowercase = prefix
if n_obs is not None:
lowercase = self.src_lens[:n_obs]
lowercase = src_lang
lowercase = tgt_lang
def __len__( self):
return len(self.src_lens)
def __getitem__( self ,A__):
lowercase = index + 1 # linecache starts at 1
lowercase = self.prefix + linecache.getline(str(self.src_file) ,A__).rstrip('''\n''')
lowercase = linecache.getline(str(self.tgt_file) ,A__).rstrip('''\n''')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,A__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,A__) else self.tokenizer
)
lowercase = self.tokenizer.generator if isinstance(self.tokenizer ,A__) else self.tokenizer
lowercase = encode_line(A__ ,A__ ,self.max_source_length ,'''right''')
lowercase = encode_line(A__ ,A__ ,self.max_target_length ,'''right''')
lowercase = source_inputs['''input_ids'''].squeeze()
lowercase = target_inputs['''input_ids'''].squeeze()
lowercase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A__ ( A__):
return [len(A__) for x in Path(A__).open().readlines()]
def A__ ( self ,A__):
lowercase = torch.stack([x['''input_ids'''] for x in batch])
lowercase = torch.stack([x['''attention_mask'''] for x in batch])
lowercase = torch.stack([x['''decoder_input_ids'''] for x in batch])
lowercase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,A__)
else self.tokenizer.pad_token_id
)
lowercase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,A__)
else self.tokenizer.pad_token_id
)
lowercase = trim_batch(A__ ,A__)
lowercase , lowercase = trim_batch(A__ ,A__ ,attention_mask=A__)
lowercase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ :str = getLogger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCAmelCase__ ) )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_git_info()
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''git_log.json''' ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=4 , **lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ ) as f:
return json.load(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = git.Repo(search_parent_directories=lowerCAmelCase__ )
lowercase = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return list(map(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''wb''' ) as f:
return pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def remove_articles(lowerCAmelCase__ ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = normalize_answer(lowerCAmelCase__ ).split()
lowercase = normalize_answer(lowerCAmelCase__ ).split()
lowercase = Counter(lowerCAmelCase__ ) & Counter(lowerCAmelCase__ )
lowercase = sum(common.values() )
if num_same == 0:
return 0
lowercase = 1.0 * num_same / len(lowerCAmelCase__ )
lowercase = 1.0 * num_same / len(lowerCAmelCase__ )
lowercase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
lowercase = 0
for hypo, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
em += exact_match_score(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
em /= len(lowerCAmelCase__ )
return {"em": em}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and not hasattr(lowerCAmelCase__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
continue
lowercase = p if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) else equivalent_param[p]
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
return hparams, config
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ :Optional[int] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = ["OwlViTFeatureExtractor"]
lowercase__ :Union[str, Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from torch import nn
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = class_size
lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase = nn.Linear(A__ ,A__)
def A__ ( self ,A__):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase = self.mlp(A__)
return logits
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ :int = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase = json.loads(open(lowerCAmelCase__ ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
lowercase = args.output + '''.pt'''
lowercase = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase = tf.train.load_checkpoint(args.tf_model_dir )
lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase = reader.get_tensor(lowerCAmelCase__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase = 8
lowercase = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/moe''' ):
lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase = key_name[-9:-7]
for i in range(16 ):
lowercase = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/mlp''' ):
lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/ln''' ):
lowercase = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/g''' ):
lowercase = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/att''' ):
lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase = state[:, 0, :, :]
lowercase = state[:, 1, :, :]
lowercase = state[:, 2, :, :]
lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase = torch.tensor(lowerCAmelCase__ )
lowercase = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase = torch.tensor(lowerCAmelCase__ )
lowercase = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/an''' ):
lowercase = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith('''/g''' ):
lowercase = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase = '''model.%s.weight''' % nlayer
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowerCAmelCase__ )
if key_name.startswith('''model/wte''' ):
lowercase = '''lm_head.weight'''
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith('''model/wob''' ):
lowercase = '''final_logits_bias'''
lowercase = vnp.copy() # same in embedded
lowercase = state.reshape((1, -1) )
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense/kernel":
lowercase = '''model.last_project.weight'''
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense_1/bias":
lowercase = '''model.last_project.bias'''
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , args.output )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowercase__ :int = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase = remove_duplicates(key.upper() )
lowercase = len(lowerCAmelCase__ )
# First fill cipher with key characters
lowercase = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase__ ) , 26 ):
lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase = alphabet[i - offset]
lowercase = char
return cipher_alphabet
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCAmelCase__ , lowerCAmelCase__ ) for ch in message.upper() )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase__ , lowerCAmelCase__ ) for ch in message.upper() )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = input('''Enter message to encode or decode: ''' ).strip()
lowercase = input('''Enter keyword: ''' ).strip()
lowercase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowercase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowercase = create_cipher_map(lowerCAmelCase__ )
print(func(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ :Tuple = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["LayoutLMv3FeatureExtractor"]
lowercase__ :Any = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :int = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : int ='''biogpt'''
def __init__( self ,A__=4_2_3_8_4 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0_2_4 ,A__=0.02 ,A__=1E-12 ,A__=True ,A__=True ,A__=0.0 ,A__=0.0 ,A__=1 ,A__=0 ,A__=2 ,**A__ ,):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = scale_embedding
lowercase = use_cache
lowercase = layerdrop
lowercase = activation_dropout
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase = [[0.0, 0.0], [0.0, 0.0]]
lowercase , lowercase = matrix[1][1], matrix[0][0]
lowercase , lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowercase = f'The input value of [n={number}] has to be > 0'
raise ValueError(lowerCAmelCase__ )
else:
lowercase = sylvester(number - 1 )
lowercase = num - 1
lowercase = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
# edges = list of graph's edges
lowercase = get_edges(lowerCAmelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase , lowercase = edges.pop()
chosen_vertices.add(lowerCAmelCase__ )
chosen_vertices.add(lowerCAmelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase__ )
return chosen_vertices
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase__ :Optional[int] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return torch.atana(lowerCAmelCase__ , lowerCAmelCase__ ) / math.pi * 2
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.sin(t * math.pi / 2 ) ** 2
lowercase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase__ , lowerCAmelCase__ )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = DiffusionAttnUnetaD(A__ ,n_attn_layers=4)
lowercase = deepcopy(self.diffusion)
lowercase = torch.quasirandom.SobolEngine(1 ,scramble=A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowercase__ :Optional[Any] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
lowercase__ :Tuple = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
lowercase__ :int = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
lowercase__ :Dict = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
lowercase__ :Optional[Any] = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
lowercase__ :List[Any] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
elif name.startswith(lowerCAmelCase__ ):
return [name.replace(lowerCAmelCase__ , lowerCAmelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=13 ):
'''simple docstring'''
lowercase = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowercase = 0
if string.startswith('''net.3.''' ):
depth += 1
lowercase = string[6:]
elif string.startswith('''net.''' ):
lowercase = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowercase = string[7:]
if string.startswith('''main.''' ):
lowercase = string[5:]
# mid block
if string[:2].isdigit():
lowercase = string[:2]
lowercase = string[2:]
else:
lowercase = string[0]
lowercase = string[1:]
if depth == max_depth:
lowercase = MID_NUM_TO_LAYER[layer_num]
lowercase = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase__ ) < 7:
lowercase = DOWN_NUM_TO_LAYER[layer_num]
lowercase = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCAmelCase__ ) > 7:
lowercase = UP_NUM_TO_LAYER[layer_num]
lowercase = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowercase = DEPTH_0_TO_LAYER[layer_num]
lowercase = f'up_blocks.{max_depth - 1}' if int(lowerCAmelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowercase = string_left[1:]
if "resnets" in new_layer:
lowercase = convert_resconv_naming(lowerCAmelCase__ )
elif "attentions" in new_layer:
lowercase = convert_attn_naming(lowerCAmelCase__ )
lowercase = new_string_left
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowercase = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase = rename(lowerCAmelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = transform_conv_attns(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase = v
return new_state_dict
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase = v[:, :, 0]
else:
# bias
lowercase = v
else:
# qkv matrices
lowercase = v.shape[0]
lowercase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowercase = download(lowerCAmelCase__ )
lowercase = MODELS_MAP[model_name]['''sample_rate''']
lowercase = MODELS_MAP[model_name]['''sample_size''']
lowercase = Object()
lowercase = sample_size
lowercase = sample_rate
lowercase = 0
lowercase = UNetaDModel(sample_size=lowerCAmelCase__ , sample_rate=lowerCAmelCase__ )
lowercase = diffusers_model.state_dict()
lowercase = DiffusionUncond(lowerCAmelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase__ )['''state_dict'''] )
lowercase = orig_model.diffusion_ema.eval()
lowercase = orig_model.state_dict()
lowercase = rename_orig_weights(lowerCAmelCase__ )
lowercase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(lowerCAmelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowercase = value.squeeze()
lowercase = value
diffusers_model.load_state_dict(lowerCAmelCase__ )
lowercase = 100
lowercase = 33
lowercase = IPNDMScheduler(num_train_timesteps=lowerCAmelCase__ )
lowercase = torch.manual_seed(lowerCAmelCase__ )
lowercase = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowercase = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase__ )[:-1]
lowercase = get_crash_schedule(lowerCAmelCase__ )
lowercase = DanceDiffusionPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
lowercase = torch.manual_seed(33 )
lowercase = pipe(num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ ).audios
lowercase = sampling.iplms_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {} )
lowercase = generated.clamp(-1 , 1 )
lowercase = (generated - audio).abs().sum()
lowercase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCAmelCase__ )
print('''Diff max''' , lowerCAmelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowercase__ :Tuple = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
lowercase__ :Dict = parser.parse_args()
main(args)
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
@dataclass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =[
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**A__):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
setattr(self ,A__ ,not kwargs.pop(A__))
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}')
lowercase = kwargs.pop('''torchscript''' ,self.torchscript)
lowercase = kwargs.pop('''torch_xla_tpu_print_metrics''' ,self.torch_xla_tpu_print_metrics)
lowercase = kwargs.pop('''fp16_opt_level''' ,self.fpaa_opt_level)
super().__init__(**A__)
lowercase_ : bool =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Trace the models using torchscript'''} )
lowercase_ : bool =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
lowercase_ : str =field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def A__ ( self):
requires_backends(self ,['''torch'''])
logger.info('''PyTorch: setting up devices''')
if not self.cuda:
lowercase = torch.device('''cpu''')
lowercase = 0
elif is_torch_tpu_available():
lowercase = xm.xla_device()
lowercase = 0
else:
lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
lowercase = torch.cuda.device_count()
return device, n_gpu
@property
def A__ ( self):
return is_torch_tpu_available() and self.tpu
@property
def A__ ( self):
requires_backends(self ,['''torch'''])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A__ ( self):
requires_backends(self ,['''torch'''])
return self._setup_devices[0]
@property
def A__ ( self):
requires_backends(self ,['''torch'''])
return self._setup_devices[1]
@property
def A__ ( self):
return self.n_gpu > 0
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ :List[str] = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase :
def __init__( self):
lowercase = [
[],
[],
[],
]
def A__ ( self ,A__ ,A__):
try:
if len(self.queues[priority]) >= 1_0_0:
raise OverflowError('''Maximum queue size is 100''')
self.queues[priority].append(A__)
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''')
def A__ ( self):
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError('''All queues are empty''')
def __str__( self):
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues))
class lowercase :
def __init__( self):
lowercase = []
def A__ ( self ,A__):
if len(self.queue) == 1_0_0:
raise OverFlowError('''Maximum queue size is 100''')
self.queue.append(A__)
def A__ ( self):
if not self.queue:
raise UnderFlowError('''The queue is empty''')
else:
lowercase = min(self.queue)
self.queue.remove(A__)
return data
def __str__( self):
return str(self.queue)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :str = {"vocab_file": "spiece.model"}
lowercase__ :List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase__ :int = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowercase__ :Optional[Any] = "▁"
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,A__ ,A__=True ,A__=True ,A__=False ,A__="[CLS]" ,A__="[SEP]" ,A__="<unk>" ,A__="[SEP]" ,A__="<pad>" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase = (
AddedToken(A__ ,lstrip=A__ ,rstrip=A__ ,normalized=A__)
if isinstance(A__ ,A__)
else mask_token
)
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A__ ,remove_space=A__ ,keep_accents=A__ ,bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A__)
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__):
if self.remove_space:
lowercase = ''' '''.join(inputs.strip().split())
else:
lowercase = inputs
lowercase = outputs.replace('''``''' ,'''"''').replace('''\'\'''' ,'''"''')
if not self.keep_accents:
lowercase = unicodedata.normalize('''NFKD''' ,A__)
lowercase = ''''''.join([c for c in outputs if not unicodedata.combining(A__)])
if self.do_lower_case:
lowercase = outputs.lower()
return outputs
def A__ ( self ,A__):
lowercase = self.preprocess_text(A__)
lowercase = self.sp_model.encode(A__ ,out_type=A__)
lowercase = []
for piece in pieces:
if len(A__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A__ ,''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase = cur_pieces[1:]
else:
lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(A__)
else:
new_pieces.append(A__)
return new_pieces
def A__ ( self ,A__):
return self.sp_model.PieceToId(A__)
def A__ ( self ,A__):
return self.sp_model.IdToPiece(A__)
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(A__)
lowercase = False
out_string += self.sp_model.decode(A__)
return out_string.strip()
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is not None:
return [1] + ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
return [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.