code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :Dict = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowercase :
def __init__( self ,A__=None ,**A__):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowercase = model
lowercase = kwargs.get('''model_save_dir''' ,A__)
lowercase = kwargs.get('''latest_model_name''' ,A__)
def __call__( self ,**A__):
lowercase = {k: np.array(A__) for k, v in kwargs.items()}
return self.model.run(A__ ,A__)
@staticmethod
def A__ ( A__ ,A__=None ,A__=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowercase = '''CPUExecutionProvider'''
return ort.InferenceSession(A__ ,providers=[provider] ,sess_options=A__)
def A__ ( self ,A__ ,A__ = None ,**A__):
lowercase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase = self.model_save_dir.joinpath(self.latest_model_name)
lowercase = Path(A__).joinpath(A__)
try:
shutil.copyfile(A__ ,A__)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase = self.model_save_dir.joinpath(A__)
if src_path.exists():
lowercase = Path(A__).joinpath(A__)
try:
shutil.copyfile(A__ ,A__)
except shutil.SameFileError:
pass
def A__ ( self ,A__ ,**A__ ,):
if os.path.isfile(A__):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(A__ ,exist_ok=A__)
# saving model weights/files
self._save_pretrained(A__ ,**A__)
@classmethod
def A__ ( cls ,A__ ,A__ = None ,A__ = None ,A__ = False ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,**A__ ,):
lowercase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__):
lowercase = OnnxRuntimeModel.load_model(
os.path.join(A__ ,A__) ,provider=A__ ,sess_options=A__)
lowercase = Path(A__)
# load model from hub
else:
# download model
lowercase = hf_hub_download(
repo_id=A__ ,filename=A__ ,use_auth_token=A__ ,revision=A__ ,cache_dir=A__ ,force_download=A__ ,)
lowercase = Path(A__).parent
lowercase = Path(A__).name
lowercase = OnnxRuntimeModel.load_model(A__ ,provider=A__ ,sess_options=A__)
return cls(model=A__ ,**A__)
@classmethod
def A__ ( cls ,A__ ,A__ = True ,A__ = None ,A__ = None ,**A__ ,):
lowercase = None
if len(str(A__).split('''@''')) == 2:
lowercase , lowercase = model_id.split('''@''')
return cls._from_pretrained(
model_id=A__ ,revision=A__ ,cache_dir=A__ ,force_download=A__ ,use_auth_token=A__ ,**A__ ,)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ :Optional[int] = "src/transformers"
# Matches is_xxx_available()
lowercase__ :Tuple = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ :int = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ :Any = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ :Optional[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ :Optional[int] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ :int = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ :Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ :str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ :Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ :List[Any] = re.compile(r"^\s*try:")
# Catches a line with else:
lowercase__ :Optional[Any] = re.compile(r"^\s*else:")
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
lowercase = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
lowercase = re.findall(R'''\[([^\]]+)\]''' , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
lowercase = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(''', ''' )
lowercase = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
lowercase = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(''', ''' )
lowercase = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
def find_duplicates(lowerCAmelCase__ ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowercase = os.path.join(lowerCAmelCase__ , '''__init__.py''' )
lowercase = parse_init(lowerCAmelCase__ )
if objects is not None:
lowercase = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError('''\n\n'''.join(lowerCAmelCase__ ) )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
lowercase = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
lowercase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
lowercase__ :int = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def UpperCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(lowerCAmelCase__ )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , '''r''' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , lowerCAmelCase__ ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase__ ) > 0:
lowercase = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = module
lowercase = nn.Sequential(
nn.Linear(module.in_features ,A__ ,bias=A__) ,nn.Linear(A__ ,module.out_features ,bias=A__) ,)
lowercase = (2.0 / (5 * min(module.in_features ,module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight ,std=A__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A__ ( self ,A__ ,*A__ ,**A__):
return self.module(A__ ,*A__ ,**A__) + self.adapter(A__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase_ : Any ='''bigscience/bloom-1b7'''
# Constant values
lowercase_ : str =2.109_659_552_692_574
lowercase_ : str ='''Hello my name is'''
lowercase_ : Union[str, Any] =set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowercase_ : Optional[Any] =10
def A__ ( self):
# Models and tokenizer
lowercase = AutoTokenizer.from_pretrained(self.model_name)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
# Models and tokenizer
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,torch_dtype=torch.floataa ,device_map='''auto''')
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
def A__ ( self):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = self.model_abit.config
self.assertTrue(hasattr(A__ ,'''quantization_config'''))
lowercase = config.to_dict()
lowercase = config.to_diff_dict()
lowercase = config.to_json_string()
def A__ ( self):
from bitsandbytes.nn import Paramsabit
lowercase = self.model_fpaa.get_memory_footprint()
lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit ,self.EXPECTED_RELATIVE_DIFFERENCE)
lowercase = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A__ ( self):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A__ ,torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A__ ( self):
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
def A__ ( self):
lowercase = BitsAndBytesConfig()
lowercase = True
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
def A__ ( self):
with self.assertRaises(A__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A__)
def A__ ( self):
lowercase = BitsAndBytesConfig()
with self.assertRaises(A__):
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=A__ ,load_in_abit=A__ ,device_map='''auto''' ,bnb_abit_quant_type='''nf4''' ,)
def A__ ( self):
with self.assertRaises(A__):
# Tries with `str`
self.model_abit.to('''cpu''')
with self.assertRaises(A__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0'''))
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = self.model_fpaa.to(torch.floataa)
lowercase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
# Check this does not throw an error
lowercase = self.model_fpaa.to('''cpu''')
# Check this does not throw an error
lowercase = self.model_fpaa.half()
# Check this does not throw an error
lowercase = self.model_fpaa.float()
def A__ ( self):
lowercase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' ,load_in_abit=A__ ,device_map='''auto''')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
@classmethod
def A__ ( cls):
lowercase = '''t5-small'''
lowercase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowercase = AutoTokenizer.from_pretrained(cls.model_name)
lowercase = '''Translate in German: Hello, my dog is cute'''
def A__ ( self):
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
from transformers import TaForConditionalGeneration
lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase = None
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
lowercase = modules
def A__ ( self):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q ,bnb.nn.Linearabit))
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
# model_name
lowercase = '''bigscience/bloom-560m'''
lowercase = '''t5-small'''
# Different types of model
lowercase = AutoModel.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# Sequence classification model
lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# CausalLM model
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# Seq2seq model
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name ,load_in_abit=A__ ,device_map='''auto''')
def A__ ( self):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
def A__ ( self):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = pipeline(
'''text-generation''' ,model=self.model_name ,model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} ,max_new_tokens=self.MAX_NEW_TOKENS ,)
# Real second forward pass
lowercase = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['''generated_text'''] ,self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
def A__ ( self):
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,load_in_abit=A__ ,device_map='''balanced''')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) ,{0, 1})
# Check that inference pass works on the model
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
# Second real batch
lowercase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = '''facebook/opt-350m'''
super().setUp()
def A__ ( self):
if version.parse(importlib.metadata.version('''bitsandbytes''')) < version.parse('''0.37.0'''):
return
# Step 1: freeze all parameters
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__)
self.assertEqual(set(model.hf_device_map.values()) ,{torch.cuda.current_device()})
for param in model.parameters():
lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A__)):
lowercase = LoRALayer(module.q_proj ,rank=1_6)
lowercase = LoRALayer(module.k_proj ,rank=1_6)
lowercase = LoRALayer(module.v_proj ,rank=1_6)
# Step 3: dummy batch
lowercase = self.tokenizer('''Test batch ''' ,return_tensors='''pt''').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase = model.forward(**A__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(A__ ,A__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(A__ ,nn.Embedding):
self.assertTrue(module.weight.grad is None)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any ='''gpt2-xl'''
lowercase_ : Union[str, Any] =3.3_191_854_854_152_187
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self):
lowercase = []
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_init_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_train_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_train_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_epoch_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_epoch_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_step_begin''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_step_end''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_evaluate''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_predict''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_save''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_log''')
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
self.events.append('''on_prediction_step''')
@require_torch
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
def A__ ( self):
shutil.rmtree(self.output_dir)
def A__ ( self ,A__=0 ,A__=0 ,A__=6_4 ,A__=6_4 ,A__=None ,A__=False ,**A__):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=A__)
lowercase = RegressionDataset(length=A__)
lowercase = RegressionModelConfig(a=A__ ,b=A__)
lowercase = RegressionPreTrainedModel(A__)
lowercase = TrainingArguments(self.output_dir ,disable_tqdm=A__ ,report_to=[] ,**A__)
return Trainer(
A__ ,A__ ,train_dataset=A__ ,eval_dataset=A__ ,callbacks=A__ ,)
def A__ ( self ,A__ ,A__):
self.assertEqual(len(A__) ,len(A__))
# Order doesn't matter
lowercase = sorted(A__ ,key=lambda A__: cb.__name__ if isinstance(A__ ,A__) else cb.__class__.__name__)
lowercase = sorted(A__ ,key=lambda A__: cb.__name__ if isinstance(A__ ,A__) else cb.__class__.__name__)
for cba, cba in zip(A__ ,A__):
if isinstance(A__ ,A__) and isinstance(A__ ,A__):
self.assertEqual(A__ ,A__)
elif isinstance(A__ ,A__) and not isinstance(A__ ,A__):
self.assertEqual(A__ ,cba.__class__)
elif not isinstance(A__ ,A__) and isinstance(A__ ,A__):
self.assertEqual(cba.__class__ ,A__)
else:
self.assertEqual(A__ ,A__)
def A__ ( self ,A__):
lowercase = ['''on_init_end''', '''on_train_begin''']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader())
lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader()) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs):
expected_events.append('''on_epoch_begin''')
for _ in range(A__):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''')
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''')
expected_events.append('''on_epoch_end''')
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def A__ ( self):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=A__)
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
def A__ ( self):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A__)
expected_callbacks.remove(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(A__)
self.assertEqual(cb.__class__ ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
trainer.add_callback(A__)
expected_callbacks.insert(0 ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A__)
expected_callbacks.remove(A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(A__)
self.assertEqual(A__ ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
trainer.add_callback(A__)
expected_callbacks.insert(0 ,A__)
self.check_callbacks_equality(trainer.callback_handler.callbacks ,A__)
def A__ ( self):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=A__)
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''')
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''')
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ ,self.get_expected_events(A__))
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''') as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(A__) in warn_mock.call_args[0][0]
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase__ :Tuple = numpy.array([0, 0])
lowercase__ :Tuple = numpy.array([0.5, 0.8_660_254])
lowercase__ :Optional[Any] = numpy.array([1, 0])
lowercase__ :str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ :Tuple = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ :Dict = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 10 ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase = 10**n
lowercase = 2_8433 * (pow(2 , 783_0457 , lowerCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def wrapper(*lowerCAmelCase__ , **lowerCAmelCase__ ):
lowercase : Any = timeit.default_timer()
lowercase : List[str] = func(*lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase : Tuple = timeit.default_timer() - starttime
return delta
lowercase : Optional[int] = func.__name__
return wrapper
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=100 , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase : Tuple = []
lowercase : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowercase : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ , _ArrayXD ):
lowercase : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ , datasets.Value ):
if v.dtype == "string":
lowercase : str = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowercase : int = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ , datasets.Sequence ):
while isinstance(lowerCAmelCase__ , datasets.Sequence ):
lowercase : List[Any] = v.feature
lowercase : List[Any] = seq_shapes[k]
lowercase : Any = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowercase : Any = data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=100 , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase : List[str] = generate_examples(lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ , path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowercase : str = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowercase , lowercase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowercase : Dict = datasets.Dataset.from_file(filename=lowerCAmelCase__ , info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase__ :List[Any] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = XLNetConfig.from_json_file(lowerCAmelCase__ )
lowercase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
lowercase = finetuning_task
lowercase = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
lowercase = finetuning_task
lowercase = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
lowercase = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase__ :Tuple = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = inspect.getfile(accelerate.test_utils)
lowercase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_script.py'''])
lowercase = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''])
lowercase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_ops.py'''])
@require_multi_gpu
def A__ ( self):
print(f'Found {torch.cuda.device_count()} devices.')
lowercase = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A__ ,env=os.environ.copy())
@require_multi_gpu
def A__ ( self):
print(f'Found {torch.cuda.device_count()} devices.')
lowercase = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A__ ,env=os.environ.copy())
@require_multi_gpu
def A__ ( self):
lowercase = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A__ ,env=os.environ.copy())
@require_multi_gpu
def A__ ( self):
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only')
lowercase = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='''0,1'''):
execute_subprocess_async(A__ ,env=os.environ.copy())
if __name__ == "__main__":
lowercase__ :int = Accelerator()
lowercase__ :Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase__ :List[str] = torch.randint(0, 10, shape).to(accelerator.device)
lowercase__ :Any = ""
lowercase__ :List[str] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ :Dict = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ :Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=False ,A__=True ,A__=False ,A__=True ,A__=3_3 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = EsmModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__)
lowercase = model(A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = EsmForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = EsmForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =False
lowercase_ : str =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : List[Any] =()
lowercase_ : Optional[Any] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int =True
def A__ ( self):
lowercase = EsmModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
@slow
def A__ ( self):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = EsmModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=A__)
lowercase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]])
lowercase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
lowercase = create_position_ids_from_input_ids(A__ ,model.padding_idx)
self.assertEqual(position_ids.shape ,expected_positions.shape)
self.assertTrue(torch.all(torch.eq(A__ ,A__)))
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()[0]
lowercase = EsmEmbeddings(config=A__)
lowercase = torch.empty(2 ,4 ,3_0)
lowercase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase = torch.as_tensor([expected_single_positions, expected_single_positions])
lowercase = embeddings.create_position_ids_from_inputs_embeds(A__)
self.assertEqual(position_ids.shape ,expected_positions.shape)
self.assertTrue(torch.all(torch.eq(A__ ,A__)))
@unittest.skip('''Esm does not support embedding resizing''')
def A__ ( self):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def A__ ( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@slow
def A__ ( self):
with torch.no_grad():
lowercase = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = 3_3
lowercase = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
@slow
def A__ ( self):
with torch.no_grad():
lowercase = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
lowercase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
lowercase = model(A__)[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return 10 - x * x
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) >= 0:
raise ValueError('''Wrong space!''' )
lowercase = a
while (b - a) >= 0.01:
# Find middle point
lowercase = (a + b) / 2
# Check if middle point is root
if equation(lowerCAmelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) < 0:
lowercase = c
else:
lowercase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
lowercase__ :List[str] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ )
lowercase = int(lowerCAmelCase__ )
lowercase = ''''''
lowercase = False
if decimal < 0:
lowercase = True
decimal *= -1
while decimal > 0:
lowercase , lowercase = divmod(lowerCAmelCase__ , 16 )
lowercase = values[remainder] + hexadecimal
lowercase = '''0x''' + hexadecimal
if negative:
lowercase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=3 ,A__=3_2 ,A__=3 ,A__=1_0 ,A__=[1_0, 2_0, 3_0, 4_0] ,A__=[1, 1, 2, 1] ,A__=True ,A__=True ,A__="relu" ,A__=3 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__)
def A__ ( self):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = self.get_config()
return config, pixel_values
def A__ ( self):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def A__ ( self ,A__ ,A__):
lowercase = FlaxRegNetModel(config=A__)
lowercase = model(A__)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def A__ ( self ,A__ ,A__):
lowercase = self.num_labels
lowercase = FlaxRegNetForImageClassification(config=A__)
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ : Optional[int] =False
lowercase_ : str =False
lowercase_ : Any =False
def A__ ( self):
lowercase = FlaxRegNetModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ ( self):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self):
def check_hidden_states_output(A__ ,A__ ,A__):
lowercase = model_class(A__)
lowercase = model(**self._prepare_for_class(A__ ,A__))
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__) ,expected_num_stages + 1)
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase = self._prepare_for_class(A__ ,A__)
lowercase = model_class(A__)
@jax.jit
def model_jitted(A__ ,**A__):
return model(pixel_values=A__ ,**A__)
with self.subTest('''JIT Enabled'''):
lowercase = model_jitted(**A__).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
lowercase = model_jitted(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__))
for jitted_output, output in zip(A__ ,A__):
self.assertEqual(jitted_output.shape ,output.shape)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def A__ ( self):
lowercase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''np''')
lowercase = model(**A__)
# verify the logits
lowercase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape ,A__)
lowercase = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4))
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''')
lowercase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
lowercase = model(A__)['''last_hidden_state''']
lowercase = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape ,A__)
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
lowercase__ :int = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase__ ) )
def UpperCamelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(solution())
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''table-transformer'''
lowercase_ : Any =['''past_key_values''']
lowercase_ : Tuple ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self ,A__=True ,A__=None ,A__=3 ,A__=1_0_0 ,A__=6 ,A__=2_0_4_8 ,A__=8 ,A__=6 ,A__=2_0_4_8 ,A__=8 ,A__=0.0 ,A__=0.0 ,A__=True ,A__="relu" ,A__=2_5_6 ,A__=0.1 ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1.0 ,A__=False ,A__="sine" ,A__="resnet50" ,A__=True ,A__=False ,A__=1 ,A__=5 ,A__=2 ,A__=1 ,A__=1 ,A__=5 ,A__=2 ,A__=0.1 ,**A__ ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(A__ ,A__):
lowercase = backbone_config.get('''model_type''')
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(A__)
# set timm attributes to None
lowercase , lowercase , lowercase = None, None, None
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
super().__init__(is_encoder_decoder=A__ ,**A__)
@property
def A__ ( self):
return self.encoder_attention_heads
@property
def A__ ( self):
return self.d_model
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def A__ ( self):
return 1E-5
@property
def A__ ( self):
return 1_2
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Dict =(DEISMultistepScheduler,)
lowercase_ : Dict =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self):
pass
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = DEISMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,algorithm_type='''deis''' ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,algorithm_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,algorithm_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.091) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowercase__ :Union[str, Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowercase__ :Optional[int] = dataset.iloc[:, 1:2].values
lowercase__ :List[Any] = dataset.iloc[:, 2].values
lowercase__ :Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
lowercase__ :Dict = PolynomialFeatures(degree=4)
lowercase__ :List[str] = poly_reg.fit_transform(X)
lowercase__ :Any = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase ( ):
'''simple docstring'''
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color='''red''' )
plt.plot(lowerCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCAmelCase__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :str = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Dict ='''deit'''
def __init__( self ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1E-12 ,A__=2_2_4 ,A__=1_6 ,A__=3 ,A__=True ,A__=1_6 ,**A__ ,):
super().__init__(**A__)
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
lowercase = encoder_stride
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ ( self):
return 1E-4
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
import unittest
from transformers import DonutProcessor
lowercase__ :Optional[int] = "naver-clova-ix/donut-base"
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = DonutProcessor.from_pretrained(A__)
def A__ ( self):
lowercase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase = self.processor.tokenajson(A__)
self.assertDictEqual(A__ ,A__)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=4 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =True
lowercase_ : Union[str, Any] =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self):
lowercase = FlaxRoFormerModelTester(self)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' ,from_pt=A__)
lowercase = model(np.ones((1, 1)))
self.assertIsNotNone(A__)
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''')
lowercase = jnp.array([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = 5_0_0_0_0
lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape ,A__)
lowercase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
import os
def UpperCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(lowerCAmelCase__ ) + '''/p022_names.txt''' ) as file:
lowercase = str(file.readlines()[0] )
lowercase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase = 0
lowercase = 0
for i, name in enumerate(lowerCAmelCase__ ):
for letter in name:
name_score += ord(lowerCAmelCase__ ) - 64
total_score += (i + 1) * name_score
lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ :List[Any] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase__ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ :Union[str, Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
import logging
from transformers import PretrainedConfig
lowercase__ :int = logging.getLogger(__name__)
lowercase__ :Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''bertabs'''
def __init__( self ,A__=3_0_5_2_2 ,A__=5_1_2 ,A__=6 ,A__=5_1_2 ,A__=8 ,A__=5_1_2 ,A__=0.2 ,A__=6 ,A__=7_6_8 ,A__=8 ,A__=2_0_4_8 ,A__=0.2 ,**A__ ,):
super().__init__(**A__)
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 633 | 0 |
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase : Optional[Any] = x
lowercase : Tuple = y
for step in range(lowerCAmelCase__ ): # noqa: B007
lowercase : Optional[Any] = a * a - b * b + x
lowercase : Any = 2 * a * b + y
lowercase : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def UpperCamelCase ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
'''simple docstring'''
lowercase : Optional[Any] = Image.new('''RGB''' , (image_width, image_height) )
lowercase : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase : Union[str, Any] = figure_width / image_width * image_height
lowercase : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase : int = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase : List[str] = get_color_coded_rgb(lowerCAmelCase__ )
else:
lowercase : List[str] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ :Union[str, Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 703 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ :int = 16
lowercase__ :Optional[Any] = 32
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return int(x / 2**20 )
class lowercase :
def __enter__( self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self ,*A__):
gc.collect()
torch.cuda.empty_cache()
lowercase = torch.cuda.memory_allocated()
lowercase = torch.cuda.max_memory_allocated()
lowercase = bamb(self.end - self.begin)
lowercase = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = "bert-base-cased" , lowerCAmelCase__ = 320 , lowerCAmelCase__ = 160 , ):
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
lowercase = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f'train[:{n_train}]', '''validation''': f'validation[:{n_val}]'} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config['''lr''']
lowercase = int(config['''num_epochs'''] )
lowercase = int(config['''seed'''] )
lowercase = int(config['''batch_size'''] )
lowercase = args.model_name_or_path
set_seed(lowerCAmelCase__ )
lowercase , lowercase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase = 1
lowercase = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
lowercase = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase = 0
# Now we train the model
lowercase = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=lowerCAmelCase__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=lowerCAmelCase__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=1 , help='''Number of train epochs.''' , )
lowercase = parser.parse_args()
lowercase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 704 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ :Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if rng is None:
lowercase = random.Random()
lowercase = 1
for dim in shape:
total_dims *= dim
lowercase = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase = np.array(lowerCAmelCase__ , dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = ids_tensor(lowerCAmelCase__ , vocab_size=2 , rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
lowercase = 1
return attn_mask
@require_flax
class lowercase :
lowercase_ : Any =None
lowercase_ : List[str] =()
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase = 2
lowercase = inputs['''input_ids'''].shape[-1] // 2
lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase = jnp.ones_like(A__)
lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 0
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase = getattr(A__ ,A__)
lowercase = pt_model_class(A__).eval()
lowercase = load_flax_weights_in_pytorch_model(A__ ,flax_model.params)
lowercase = flax_model.generate(A__).sequences
lowercase = pt_model.generate(torch.tensor(A__ ,dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = False
lowercase = max_length
lowercase = 2
lowercase = 2
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences)
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = True
lowercase = max_length
lowercase = 0.8
lowercase = 1_0
lowercase = 0.3
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
lowercase = max_length
lowercase = 2
lowercase = 1
lowercase = 8
lowercase = 9
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = False
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = True
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
def A__ ( self):
lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase = attention_mask.at[(0, 0)].set(0)
lowercase = 2
lowercase = max_length
for model_class in self.all_generative_model_classes:
lowercase = model_class(A__)
lowercase = model.generate(A__ ,attention_mask=A__).sequences
self.assertEqual(generation_outputs.shape[-1] ,A__)
lowercase = jit(model.generate)
lowercase = jit_generate(A__ ,attention_mask=A__).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist())
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
lowercase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
lowercase = '''Hello world'''
lowercase = tokenizer(A__ ,return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A__ ,'''do_samples'''):
model.generate(A__ ,do_samples=A__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A__ ,'''foo'''):
lowercase = {'''foo''': '''bar'''}
model.generate(A__ ,**A__)
| 633 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] =['''vqvae''']
def __init__( self ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(unet=A__ ,scheduler=A__ ,mel=A__ ,vqvae=A__)
def A__ ( self):
return 5_0 if isinstance(self.scheduler ,A__) else 1_0_0_0
@torch.no_grad()
def __call__( self ,A__ = 1 ,A__ = None ,A__ = None ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = None ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = 0 ,A__ = None ,A__ = None ,A__=True ,):
lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A__)
lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=A__ ,device=self.device ,)
lowercase = noise
lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A__ ,A__)
lowercase = self.mel.audio_slice_to_image(A__)
lowercase = np.frombuffer(input_image.tobytes() ,dtype='''uint8''').reshape(
(input_image.height, input_image.width))
lowercase = (input_image / 2_5_5) * 2 - 1
lowercase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowercase = self.vqvae.encode(torch.unsqueeze(A__ ,0)).latent_dist.sample(
generator=A__)[0]
lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase = self.scheduler.add_noise(A__ ,A__ ,self.scheduler.timesteps[start_step - 1])
lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase = int(mask_start_secs * pixels_per_second)
lowercase = int(mask_end_secs * pixels_per_second)
lowercase = self.scheduler.add_noise(A__ ,A__ ,torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet ,A__):
lowercase = self.unet(A__ ,A__ ,A__)['''sample''']
else:
lowercase = self.unet(A__ ,A__)['''sample''']
if isinstance(self.scheduler ,A__):
lowercase = self.scheduler.step(
model_output=A__ ,timestep=A__ ,sample=A__ ,eta=A__ ,generator=A__ ,)['''prev_sample''']
else:
lowercase = self.scheduler.step(
model_output=A__ ,timestep=A__ ,sample=A__ ,generator=A__ ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase = 1 / self.vqvae.config.scaling_factor * images
lowercase = self.vqvae.decode(A__)['''sample''']
lowercase = (images / 2 + 0.5).clamp(0 ,1)
lowercase = images.cpu().permute(0 ,2 ,3 ,1).numpy()
lowercase = (images * 2_5_5).round().astype('''uint8''')
lowercase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A__ ,mode='''RGB''').convert('''L''') for _ in images))
lowercase = [self.mel.image_to_audio(A__) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A__)[:, np.newaxis, :]) ,**ImagePipelineOutput(A__))
@torch.no_grad()
def A__ ( self ,A__ ,A__ = 5_0):
assert isinstance(self.scheduler ,A__)
self.scheduler.set_timesteps(A__)
lowercase = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
lowercase = (sample / 2_5_5) * 2 - 1
lowercase = torch.Tensor(A__).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,))):
lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase = self.scheduler.alphas_cumprod[t]
lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase = 1 - alpha_prod_t
lowercase = self.unet(A__ ,A__)['''sample''']
lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A__ ( A__ ,A__ ,A__):
lowercase = acos(torch.dot(torch.flatten(A__) ,torch.flatten(A__)) / torch.norm(A__) / torch.norm(A__))
return sin((1 - alpha) * theta) * xa / sin(A__) + sin(alpha * theta) * xa / sin(A__)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase__ :str = None
try:
import msvcrt
except ImportError:
lowercase__ :Tuple = None
try:
import fcntl
except ImportError:
lowercase__ :Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase__ :List[Any] = OSError
# Data
# ------------------------------------------------
lowercase__ :List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
lowercase__ :Tuple = "3.0.12"
lowercase__ :Tuple = None
def UpperCamelCase ( ):
'''simple docstring'''
global _logger
lowercase = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
lowercase = lock_file
return None
def __str__( self):
lowercase = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowercase :
def __init__( self ,A__):
lowercase = lock
return None
def __enter__( self):
return self.lock
def __exit__( self ,A__ ,A__ ,A__):
self.lock.release()
return None
class lowercase :
def __init__( self ,A__ ,A__=-1 ,A__=None):
lowercase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowercase = self.hash_filename_if_too_long(A__ ,A__)
# The path to the lock file.
lowercase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase = None
# The default timeout value.
lowercase = timeout
# We use this lock primarily for the lock counter.
lowercase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase = 0
return None
@property
def A__ ( self):
return self._lock_file
@property
def A__ ( self):
return self._timeout
@timeout.setter
def A__ ( self ,A__):
lowercase = float(A__)
return None
def A__ ( self):
raise NotImplementedError()
def A__ ( self):
raise NotImplementedError()
@property
def A__ ( self):
return self._lock_file_fd is not None
def A__ ( self ,A__=None ,A__=0.05):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase = id(self)
lowercase = self._lock_file
lowercase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(A__)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase = max(0 ,self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def A__ ( self ,A__=False):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase = id(self)
lowercase = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
lowercase = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self):
self.acquire()
return self
def __exit__( self ,A__ ,A__ ,A__):
self.release()
return None
def __del__( self):
self.release(force=A__)
return None
def A__ ( self ,A__ ,A__):
lowercase = os.path.basename(A__)
if len(A__) > max_length and max_length > 0:
lowercase = os.path.dirname(A__)
lowercase = str(hash(A__))
lowercase = filename[: max_length - len(A__) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(A__ ,A__)
else:
return path
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__=-1 ,A__=None):
from .file_utils import relative_to_absolute_path
super().__init__(A__ ,timeout=A__ ,max_filename_length=A__)
lowercase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file)
def A__ ( self):
lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase = os.open(self._lock_file ,A__)
except OSError:
pass
else:
try:
msvcrt.locking(A__ ,msvcrt.LK_NBLCK ,1)
except OSError:
os.close(A__)
else:
lowercase = fd
return None
def A__ ( self):
lowercase = self._lock_file_fd
lowercase = None
msvcrt.locking(A__ ,msvcrt.LK_UNLCK ,1)
os.close(A__)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__=-1 ,A__=None):
lowercase = os.statvfs(os.path.dirname(A__)).f_namemax
super().__init__(A__ ,timeout=A__ ,max_filename_length=A__)
def A__ ( self):
lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase = os.open(self._lock_file ,A__)
try:
fcntl.flock(A__ ,fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(A__)
else:
lowercase = fd
return None
def A__ ( self):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase = self._lock_file_fd
lowercase = None
fcntl.flock(A__ ,fcntl.LOCK_UN)
os.close(A__)
return None
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase = os.open(self._lock_file ,A__)
except OSError:
pass
else:
lowercase = fd
return None
def A__ ( self):
os.close(self._lock_file_fd)
lowercase = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase__ :Optional[Any] = None
if msvcrt:
lowercase__ :Tuple = WindowsFileLock
elif fcntl:
lowercase__ :int = UnixFileLock
else:
lowercase__ :int = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 706 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 0 |
from __future__ import annotations
from math import pi
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
'''simple docstring'''
lowercase = a
while True:
lowercase = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 633 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple ='''dpr'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__ = 0 ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = projection_dim
lowercase = position_embedding_type
| 708 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowercase = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowercase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCAmelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase = primes[:idx]
break
lowercase , lowercase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase = False
for r in range(lowerCAmelCase__ ):
lowercase = pow(lowerCAmelCase__ , d * 2**r , lowerCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCamelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 709 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("""RUN_SLOW""", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 710 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633 | 0 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 711 |
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModel.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModel.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForPreTraining.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForPreTraining.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForCausalLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForCausalLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForCausalLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForCausalLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForMaskedLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForMaskedLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForMaskedLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForMaskedLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(A__ ,from_pt=A__)
lowercase , lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ,from_tf=A__)
lowercase , lowercase = AutoModelForSeqaSeqLM.from_pretrained(
A__ ,output_loading_info=A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForSequenceClassification.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForSequenceClassification.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
@slow
def A__ ( self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = TFAutoModelForQuestionAnswering.from_pretrained(A__ ,from_pt=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
lowercase = AutoModelForQuestionAnswering.from_pretrained(A__ ,from_tf=A__)
self.assertIsNotNone(A__)
self.assertIsInstance(A__ ,A__)
def A__ ( self):
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
def A__ ( self):
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ ,from_pt=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
lowercase = AutoModelWithLMHead.from_pretrained(A__ ,from_tf=A__)
self.assertIsInstance(A__ ,A__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=A__) ,1_4_4_1_0)
| 713 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class lowercase :
def __init__( self ,A__):
lowercase = str(id_)
lowercase = None
lowercase = None
lowercase = []
lowercase = {} # {vertex:distance}
def __lt__( self ,A__):
return self.key < other.key
def __repr__( self):
return self.id
def A__ ( self ,A__):
self.neighbors.append(A__)
def A__ ( self ,A__ ,A__):
lowercase = weight
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = graph[:]
while q:
lowercase = min(lowerCAmelCase__ )
q.remove(lowerCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = list(lowerCAmelCase__ )
hq.heapify(lowerCAmelCase__ )
while h:
lowercase = hq.heappop(lowerCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
hq.heapify(lowerCAmelCase__ )
for i in range(1 , len(lowerCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
lowercase = job['''started_at''']
lowercase = job['''completed_at''']
lowercase = date_parser.parse(lowerCAmelCase__ )
lowercase = date_parser.parse(lowerCAmelCase__ )
lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase = start
lowercase = end
lowercase = duration_in_min
return job_info
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = None
if token is not None:
lowercase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
lowercase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowercase = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
lowercase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
lowercase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
lowercase = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
lowercase__ :Optional[int] = parser.parse_args()
lowercase__ :List[str] = get_job_time(args.workflow_run_id)
lowercase__ :Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 715 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase__ :str = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,**A__):
requires_backends(self ,['''bs4'''])
super().__init__(**A__)
def A__ ( self ,A__):
lowercase = []
lowercase = []
lowercase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase = parent.find_all(child.name ,recursive=A__)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(A__) else next(i for i, s in enumerate(A__ ,1) if s is child))
lowercase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A__ ( self ,A__):
lowercase = BeautifulSoup(A__ ,'''html.parser''')
lowercase = []
lowercase = []
lowercase = []
for element in html_code.descendants:
if type(A__) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
lowercase = html.unescape(A__).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(A__)
lowercase , lowercase = self.xpath_soup(A__)
stringaxtag_seq.append(A__)
stringaxsubs_seq.append(A__)
if len(A__) != len(A__):
raise ValueError('''Number of doc strings and xtags does not correspond''')
if len(A__) != len(A__):
raise ValueError('''Number of doc strings and xsubs does not correspond''')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A__ ( self ,A__ ,A__):
lowercase = ''''''
for tagname, subs in zip(A__ ,A__):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self ,A__):
lowercase = False
# Check that strings has a valid type
if isinstance(A__ ,A__):
lowercase = True
elif isinstance(A__ ,(list, tuple)):
if len(A__) == 0 or isinstance(html_strings[0] ,A__):
lowercase = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f'but is of type {type(A__)}.')
lowercase = bool(isinstance(A__ ,(list, tuple)) and (isinstance(html_strings[0] ,A__)))
if not is_batched:
lowercase = [html_strings]
# Get nodes + xpaths
lowercase = []
lowercase = []
for html_string in html_strings:
lowercase , lowercase , lowercase = self.get_three_from_single(A__)
nodes.append(A__)
lowercase = []
for node, tag_list, sub_list in zip(A__ ,A__ ,A__):
lowercase = self.construct_xpath(A__ ,A__)
xpath_strings.append(A__)
xpaths.append(A__)
# return as Dict
lowercase = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase = BatchFeature(data=A__ ,tensor_type=A__)
return encoded_inputs
| 716 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ):
'''simple docstring'''
lowercase = limit + 1
lowercase = [0] * limit
for first_term in range(1 , lowerCAmelCase__ ):
for n in range(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowercase = f'{src_lang}-{tgt_lang}'
lowercase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , '''README.md''' )
print(f'Generating {path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
lowercase__ :int = Path(__file__).resolve().parent.parent.parent
lowercase__ :Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ :Any = model_name.split("-")
lowercase__ :Optional[int] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 718 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :Dict = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
lowercase__ :List[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
lowercase__ :Union[str, Any] = {
"jukebox": 512,
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple =PRETRAINED_LYRIC_TOKENS_SIZES
lowercase_ : Tuple =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__ ,A__ ,A__=["v3", "v2", "v2"] ,A__=5_1_2 ,A__=5 ,A__="<|endoftext|>" ,**A__ ,):
lowercase = AddedToken(A__ ,lstrip=A__ ,rstrip=A__) if isinstance(A__ ,A__) else unk_token
super().__init__(
unk_token=A__ ,n_genres=A__ ,version=A__ ,max_n_lyric_tokens=A__ ,**A__ ,)
lowercase = version
lowercase = max_n_lyric_tokens
lowercase = n_genres
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
with open(A__ ,encoding='''utf-8''') as vocab_handle:
lowercase = json.load(A__)
lowercase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
lowercase = oov.replace(r'''\-\'''' ,r'''\-+\'''')
lowercase = regex.compile(A__)
lowercase = {v: k for k, v in self.artists_encoder.items()}
lowercase = {v: k for k, v in self.genres_encoder.items()}
lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def A__ ( self):
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def A__ ( self):
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = [self.artists_encoder.get(A__ ,0) for artist in list_artists]
for genres in range(len(A__)):
lowercase = [self.genres_encoder.get(A__ ,0) for genre in list_genres[genres]]
lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
lowercase = [[self.lyrics_encoder.get(A__ ,0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def A__ ( self ,A__):
return list(A__)
def A__ ( self ,A__ ,A__ ,A__ ,**A__):
lowercase , lowercase , lowercase = self.prepare_for_tokenization(A__ ,A__ ,A__)
lowercase = self._tokenize(A__)
return artist, genre, lyrics
def A__ ( self ,A__ ,A__ ,A__ ,A__ = False):
for idx in range(len(self.version)):
if self.version[idx] == "v3":
lowercase = artists[idx].lower()
lowercase = [genres[idx].lower()]
else:
lowercase = self._normalize(artists[idx]) + '''.v2'''
lowercase = [
self._normalize(A__) + '''.v2''' for genre in genres[idx].split('''_''')
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''')
lowercase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
lowercase = {vocab[index]: index + 1 for index in range(len(A__))}
lowercase = 0
lowercase = len(A__) + 1
lowercase = self.vocab
lowercase = {v: k for k, v in self.vocab.items()}
lowercase = ''''''
else:
lowercase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''')
lowercase = self._run_strip_accents(A__)
lowercase = lyrics.replace('''\\''' ,'''\n''')
lowercase = self.out_of_vocab.sub('''''' ,A__), [], []
return artists, genres, lyrics
def A__ ( self ,A__):
lowercase = unicodedata.normalize('''NFD''' ,A__)
lowercase = []
for char in text:
lowercase = unicodedata.category(A__)
if cat == "Mn":
continue
output.append(A__)
return "".join(A__)
def A__ ( self ,A__):
lowercase = (
[chr(A__) for i in range(ord('''a''') ,ord('''z''') + 1)]
+ [chr(A__) for i in range(ord('''A''') ,ord('''Z''') + 1)]
+ [chr(A__) for i in range(ord('''0''') ,ord('''9''') + 1)]
+ ['''.''']
)
lowercase = frozenset(A__)
lowercase = re.compile(r'''_+''')
lowercase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()])
lowercase = pattern.sub('''_''' ,A__).strip('''_''')
return text
def A__ ( self ,A__):
return " ".join(A__)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
# Convert to TensorType
if not isinstance(A__ ,A__):
lowercase = TensorType(A__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''')
import tensorflow as tf
lowercase = tf.constant
lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''')
import torch
lowercase = torch.tensor
lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''')
import jax.numpy as jnp # noqa: F811
lowercase = jnp.array
lowercase = _is_jax
else:
lowercase = np.asarray
lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase = [inputs]
if not is_tensor(A__):
lowercase = as_tensor(A__)
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''')
return inputs
def __call__( self ,A__ ,A__ ,A__="" ,A__="pt"):
lowercase = [0, 0, 0]
lowercase = [artist] * len(self.version)
lowercase = [genres] * len(self.version)
lowercase , lowercase , lowercase = self.tokenize(A__ ,A__ ,A__)
lowercase , lowercase , lowercase = self._convert_token_to_id(A__ ,A__ ,A__)
lowercase = [-INFINITY] * len(full_tokens[-1])
lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=A__)
for i in range(len(self.version))
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks})
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=A__))
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=A__))
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''])
with open(A__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=A__))
return (artists_file, genres_file, lyrics_file)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.artists_decoder.get(A__)
lowercase = [self.genres_decoder.get(A__) for genre in genres_index]
lowercase = [self.lyrics_decoder.get(A__) for character in lyric_index]
return artist, genres, lyrics
| 719 |
class lowercase :
def __init__( self ,A__):
lowercase = val
lowercase = None
lowercase = None
def A__ ( self ,A__):
if self.val:
if val < self.val:
if self.left is None:
lowercase = Node(A__)
else:
self.left.insert(A__)
elif val > self.val:
if self.right is None:
lowercase = Node(A__)
else:
self.right.insert(A__)
else:
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCAmelCase__ )
res.append(root.val )
inorder(root.right , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Build BST
if len(lowerCAmelCase__ ) == 0:
return arr
lowercase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase = []
inorder(lowerCAmelCase__ , lowerCAmelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 633 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowercase = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ :Tuple = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =['''pixel_values''']
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 2_5_5 ,A__ = True ,A__ = None ,A__ = None ,**A__ ,):
super().__init__(**A__)
lowercase = size if size is not None else {'''shortest_edge''': 2_2_4}
lowercase = get_size_dict(A__ ,default_to_square=A__)
lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase = get_size_dict(A__ ,param_name='''crop_size''')
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
lowercase = get_size_dict(A__ ,default_to_square=A__)
if "shortest_edge" in size:
lowercase = get_resize_output_image_size(A__ ,size['''shortest_edge'''] ,default_to_square=A__)
elif "height" in size and "width" in size:
lowercase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = get_size_dict(A__)
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(A__ ,size=(size['''height'''], size['''width''']) ,data_format=A__ ,**A__)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
lowercase = to_numpy_array(A__)
if do_resize:
lowercase = self.resize(image=A__ ,size=A__ ,resample=A__)
if do_center_crop:
lowercase = self.center_crop(A__ ,size=A__)
if do_rescale:
lowercase = self.rescale(image=A__ ,scale=A__)
if do_normalize:
lowercase = self.normalize(image=A__ ,mean=A__ ,std=A__)
lowercase = to_channel_dimension_format(A__ ,A__)
return image
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(A__ ,default_to_square=A__)
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(A__ ,param_name='''crop_size''')
if not valid_images(A__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
lowercase = make_batched(A__)
lowercase = [
[
self._preprocess_image(
image=A__ ,do_resize=A__ ,size=A__ ,resample=A__ ,do_center_crop=A__ ,crop_size=A__ ,do_rescale=A__ ,rescale_factor=A__ ,do_normalize=A__ ,image_mean=A__ ,image_std=A__ ,data_format=A__ ,)
for img in video
]
for video in videos
]
lowercase = {'''pixel_values''': videos}
return BatchFeature(data=A__ ,tensor_type=A__)
| 721 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowercase : Union[str, Any] , **lowercase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 634 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def UpperCAmelCase__ ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ) -> Optional[int]:
__lowercase = AutoTokenizer.from_pretrained(lowercase__ )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[int]:
# Initialize accelerator
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
__lowercase = args.model_name_or_path
set_seed(lowercase__ )
__lowercase , __lowercase = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase = 1
__lowercase = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
__lowercase = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = evaluate.load("""glue""" , """mrpc""" )
__lowercase = 0
__lowercase = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
__lowercase = model(**lowercase__ )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowercase = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowercase__ )
__lowercase = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
__lowercase = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase__ ( ) -> Dict:
__lowercase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 634 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase__ ( lowercase__ ) -> int:
__lowercase = args.pruning_method
__lowercase = args.threshold
__lowercase = args.model_name_or_path.rstrip("""/""" )
__lowercase = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
__lowercase = torch.load(os.path.join(lowercase__ , """pytorch_model.bin""" ) )
__lowercase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowercase = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
__lowercase = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
__lowercase = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
__lowercase = MagnitudeBinarizer.apply(inputs=lowercase__ , threshold=lowercase__ )
__lowercase = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowercase = name[:-6]
__lowercase = model[F"{prefix_}mask_scores"]
__lowercase = TopKBinarizer.apply(lowercase__ , lowercase__ )
__lowercase = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowercase = name[:-6]
__lowercase = model[F"{prefix_}mask_scores"]
__lowercase = ThresholdBinarizer.apply(lowercase__ , lowercase__ , lowercase__ )
__lowercase = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowercase = name[:-6]
__lowercase = model[F"{prefix_}mask_scores"]
__lowercase , __lowercase = -0.1, 1.1
__lowercase = torch.sigmoid(lowercase__ )
__lowercase = s * (r - l) + l
__lowercase = s_bar.clamp(min=0.0 , max=1.0 )
__lowercase = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowercase = os.path.join(
os.path.dirname(lowercase__ ) , F"bertarized_{os.path.basename(lowercase__ )}" )
if not os.path.isdir(lowercase__ ):
shutil.copytree(lowercase__ , lowercase__ )
print(F"\nCreated folder {target_model_path}" )
torch.save(lowercase__ , os.path.join(lowercase__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowercase : int , lowercase : Dict=False , lowercase : int=True , lowercase : Optional[Any]=False , lowercase : Tuple="<s>" , lowercase : Tuple="</s>" , lowercase : Union[str, Any]="<unk>" , lowercase : str="<sep>" , lowercase : int="<pad>" , lowercase : Tuple="<cls>" , lowercase : Optional[int]="<mask>" , lowercase : Optional[Any]=["<eop>", "<eod>"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Optional[Any] , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__lowercase = jieba
__lowercase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def snake_case__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : List[str] , lowercase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.remove_space:
__lowercase = """ """.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase = unicodedata.normalize("""NFKD""" , lowercase )
__lowercase = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def snake_case__ ( self : List[Any] , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.preprocess_text(lowercase )
__lowercase = self.sp_model.encode(lowercase , out_type=lowercase )
__lowercase = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def snake_case__ ( self : Any , lowercase : Dict ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(lowercase )
def snake_case__ ( self : str , lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def snake_case__ ( self : List[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self : Optional[int] , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def snake_case__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def snake_case__ ( self : Optional[int] , *lowercase : List[str] , **lowercase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = super()._decode(*lowercase , **lowercase )
__lowercase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 634 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 1 |
import math
from collections.abc import Callable
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = xa
__lowercase = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
__lowercase = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__lowercase = x_na
__lowercase = x_na
def UpperCAmelCase__ ( lowercase__ ) -> float:
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 634 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(lowercase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowercase ):
self.assertDictEqual(lowercase , example_records[i] )
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(lowercase )
__lowercase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case__ ( self : Tuple ) -> str: # checks what happens with missing columns
"""simple docstring"""
__lowercase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowercase = Dataset.from_list(lowercase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case__ ( self : int ) -> List[str]: # checks if the type can be inferred from the second record
"""simple docstring"""
__lowercase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowercase = Dataset.from_list(lowercase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = Dataset.from_list([] )
self.assertEqual(len(lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 634 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( lowercase__ ) -> int:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(lowercase__ )
__lowercase = importlib.import_module(F".{module_name}" , """transformers.models""" )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , """__name__""" , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module("""transformers""" )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ) -> Union[str, Any]:
__lowercase = get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowercase__ , encoding="""utf-8""" ) as reader:
return json.load(lowercase__ )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowercase )
def snake_case__ ( cls : str , lowercase : str , **lowercase : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = kwargs.pop("""config""" , lowercase )
__lowercase = kwargs.pop("""trust_remote_code""" , lowercase )
__lowercase = True
__lowercase , __lowercase = FeatureExtractionMixin.get_feature_extractor_dict(lowercase , **lowercase )
__lowercase = config_dict.get("""feature_extractor_type""" , lowercase )
__lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase , lowercase ):
__lowercase = AutoConfig.from_pretrained(lowercase , **lowercase )
# It could be in `config.feature_extractor_type``
__lowercase = getattr(lowercase , """feature_extractor_type""" , lowercase )
if hasattr(lowercase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowercase = feature_extractor_class_from_name(lowercase )
__lowercase = feature_extractor_auto_map is not None
__lowercase = feature_extractor_class is not None or type(lowercase ) in FEATURE_EXTRACTOR_MAPPING
__lowercase = resolve_trust_remote_code(
lowercase , lowercase , lowercase , lowercase )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
lowercase , lowercase , **lowercase )
__lowercase = kwargs.pop("""code_revision""" , lowercase )
if os.path.isdir(lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase , **lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase , **lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase = FEATURE_EXTRACTOR_MAPPING[type(lowercase )]
return feature_extractor_class.from_dict(lowercase , **lowercase )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowercase , lowercase )
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
def UpperCAmelCase__ ( lowercase__ ) -> list[int]:
if length <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(lowercase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 634 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase__ = Mapping[str, np.ndarray]
UpperCamelCase__ = Mapping[str, Any] # Is a nested dict.
UpperCamelCase__ = 0.01
@dataclasses.dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__ : Optional[Sequence[int]] = None
def UpperCAmelCase__ ( lowercase__ ) -> Protein:
__lowercase = r"""(\[[A-Z]+\]\n)"""
__lowercase = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
__lowercase = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__lowercase = ["N", "CA", "C"]
__lowercase = None
__lowercase = None
__lowercase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowercase = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
__lowercase = """X""" # FIXME: strings are immutable
__lowercase = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowercase = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
__lowercase = np.array(lowercase__ )
__lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
__lowercase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowercase = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__lowercase = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
__lowercase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def UpperCAmelCase__ ( lowercase__ , lowercase__ = 0 ) -> List[str]:
__lowercase = []
__lowercase = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
__lowercase = prot.parents
__lowercase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowercase = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
__lowercase = ["""N/A"""]
pdb_headers.append(F"PARENT {' '.join(lowercase__ )}" )
return pdb_headers
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> str:
__lowercase = []
__lowercase = pdb_str.split("""\n""" )
__lowercase = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
__lowercase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__lowercase = []
if prot.parents_chain_index is not None:
__lowercase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
__lowercase = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowercase = parent_dict.get(str(lowercase__ ) , ["""N/A"""] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowercase = [["""N/A"""]]
def make_parent_line(lowercase__ ) -> str:
return F"PARENT {' '.join(lowercase__ )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowercase = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
__lowercase = parents_per_chain[chain_counter]
else:
__lowercase = ["""N/A"""]
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> str:
__lowercase = residue_constants.restypes + ["""X"""]
def res_atoa(lowercase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__lowercase = residue_constants.atom_types
__lowercase = []
__lowercase = prot.atom_mask
__lowercase = prot.aatype
__lowercase = prot.atom_positions
__lowercase = prot.residue_index.astype(np.intaa )
__lowercase = prot.b_factors
__lowercase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__lowercase = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
__lowercase = aatype.shape[0]
__lowercase = 1
__lowercase = 0
__lowercase = string.ascii_uppercase
__lowercase = None
# Add all atom sites.
for i in range(lowercase__ ):
__lowercase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__lowercase = """ATOM"""
__lowercase = atom_name if len(lowercase__ ) == 4 else F" {atom_name}"
__lowercase = """"""
__lowercase = """"""
__lowercase = 1.00
__lowercase = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowercase = """"""
__lowercase = """A"""
if chain_index is not None:
__lowercase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowercase = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(lowercase__ )
atom_index += 1
__lowercase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowercase = True
__lowercase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowercase = """TER"""
__lowercase = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , ) -> Protein:
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 634 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 1 |
def UpperCAmelCase__ ( lowercase__ ) -> int:
if not isinstance(lowercase__ , lowercase__ ):
__lowercase = F"Input value of [number={number}] must be an integer"
raise TypeError(lowercase__ )
if number < 1:
__lowercase = F"Input value of [number={number}] must be > 0"
raise ValueError(lowercase__ )
__lowercase = 1
for i in range(1 , lowercase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
__lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowercase = 128
elif "12-12" in model_name:
__lowercase = 12
__lowercase = 12
elif "14-14" in model_name:
__lowercase = 14
__lowercase = 14
elif "16-16" in model_name:
__lowercase = 16
__lowercase = 16
else:
raise ValueError("""Model not supported""" )
__lowercase = """huggingface/label-files"""
if "speech-commands" in model_name:
__lowercase = 35
__lowercase = """speech-commands-v2-id2label.json"""
else:
__lowercase = 527
__lowercase = """audioset-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
if "module.v" in name:
__lowercase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowercase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowercase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowercase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowercase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Any:
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[3] )
__lowercase = config.hidden_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
__lowercase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False ) -> str:
__lowercase = get_audio_spectrogram_transformer_config(lowercase__ )
__lowercase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
__lowercase = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
__lowercase = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowercase = -4.2677393 if """speech-commands""" not in model_name else -6.845978
__lowercase = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
__lowercase = 1_024 if """speech-commands""" not in model_name else 128
__lowercase = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
__lowercase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowercase = dataset[0]["""audio"""]["""array"""]
else:
__lowercase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowercase , __lowercase = torchaudio.load(lowercase__ )
__lowercase = waveform.squeeze().numpy()
__lowercase = feature_extractor(lowercase__ , sampling_rate=16_000 , return_tensors="""pt""" )
# forward pass
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowercase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowercase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowercase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowercase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowercase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowercase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowercase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowercase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 634 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase : Dict , lowercase : List[str]=3 , lowercase : Dict=32 , lowercase : Union[str, Any]=3 , lowercase : Tuple=10 , lowercase : List[str]=[10, 20, 30, 40] , lowercase : List[str]=[1, 1, 2, 1] , lowercase : Dict=True , lowercase : int=True , lowercase : Optional[int]="relu" , lowercase : Optional[int]=3 , lowercase : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(lowercase )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self : int , lowercase : List[str] , lowercase : Dict , lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFResNetModel(config=lowercase )
__lowercase = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFResNetForImageClassification(lowercase )
__lowercase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase__ : Tuple = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase__ : List[str] = False
lowercase__ : List[Any] = False
lowercase__ : List[str] = False
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = False
def snake_case__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = TFResNetModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase : str , lowercase : List[str] , lowercase : Any ):
__lowercase = model_class(lowercase )
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase = layer_type
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def snake_case__ ( self : Dict ) -> Dict:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFResNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> str:
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**lowercase )
# verify the logits
__lowercase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase , atol=1E-4 ) )
| 634 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from random import randint, random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ) -> list:
__lowercase = [[-1] * number_of_cells] # Create a highway without any car
__lowercase = 0
__lowercase = max(lowercase__ , 0 )
while i < number_of_cells:
__lowercase = (
randint(0 , lowercase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
__lowercase = 0
__lowercase = highway_now[car_index + 1 :]
for cell in range(len(lowercase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase__ , -1 )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> list:
__lowercase = len(lowercase__ )
# Beforce calculations, the highway is empty
__lowercase = [-1] * number_of_cells
for car_index in range(lowercase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowercase = min(highway_now[car_index] + 1 , lowercase__ )
# Number of empty cell before the next car
__lowercase = get_distance(lowercase__ , lowercase__ ) - 1
# We can't have the car causing an accident
__lowercase = min(next_highway[car_index] , lowercase__ )
if random() < probability:
# Randomly, a driver will slow down
__lowercase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> list:
__lowercase = len(highway[0] )
for i in range(lowercase__ ):
__lowercase = update(highway[i] , lowercase__ , lowercase__ )
__lowercase = [-1] * number_of_cells
for car_index in range(lowercase__ ):
__lowercase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowercase = (car_index + speed) % number_of_cells
# Commit the change of position
__lowercase = speed
highway.append(lowercase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 1 |
import random
def UpperCAmelCase__ ( lowercase__ ) -> bool:
__lowercase = num - 1
__lowercase = 0
while s % 2 == 0:
__lowercase = s // 2
t += 1
for _ in range(5 ):
__lowercase = random.randrange(2 , num - 1 )
__lowercase = pow(lowercase__ , lowercase__ , lowercase__ )
if v != 1:
__lowercase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowercase = i + 1
__lowercase = (v**2) % num
return True
def UpperCAmelCase__ ( lowercase__ ) -> bool:
if num < 2:
return False
__lowercase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase__ )
def UpperCAmelCase__ ( lowercase__ = 1_024 ) -> int:
while True:
__lowercase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase__ ):
return num
if __name__ == "__main__":
UpperCamelCase__ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 634 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 1 |
import math
def UpperCAmelCase__ ( lowercase__ ) -> bool:
__lowercase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase__ )
def UpperCAmelCase__ ( lowercase__ = 1 / 12_345 ) -> int:
__lowercase = 0
__lowercase = 0
__lowercase = 3
while True:
__lowercase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase__ ):
__lowercase = int(lowercase__ )
total_partitions += 1
if check_partition_perfect(lowercase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ) -> tuple[int, float, str]:
__lowercase = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
__lowercase = frequencies_dict
if not case_sensitive:
__lowercase = ciphertext.lower()
# Chi squared statistic values
__lowercase = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
__lowercase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 634 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int]=0.2 , lowercase : Union[str, Any]=0.2 ) -> Any:
"""simple docstring"""
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = conva_get[:2]
__lowercase = conva_get[2]
__lowercase = size_pa
__lowercase = rate_w
__lowercase = rate_t
__lowercase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = -2 * np.random.rand(self.conva[1] ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
def snake_case__ ( self : Union[str, Any] , lowercase : int ) -> int:
"""simple docstring"""
__lowercase = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F"Model saved: {save_path}" )
@classmethod
def snake_case__ ( cls : Any , lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
with open(lowercase , """rb""" ) as f:
__lowercase = pickle.load(lowercase ) # noqa: S301
__lowercase = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
__lowercase = model_dic.get("""size_pooling1""" )
__lowercase = model_dic.get("""num_bp1""" )
__lowercase = model_dic.get("""num_bp2""" )
__lowercase = model_dic.get("""num_bp3""" )
__lowercase = model_dic.get("""rate_weight""" )
__lowercase = model_dic.get("""rate_thre""" )
# create model instance
__lowercase = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
__lowercase = model_dic.get("""w_conv1""" )
__lowercase = model_dic.get("""wkj""" )
__lowercase = model_dic.get("""vji""" )
__lowercase = model_dic.get("""thre_conv1""" )
__lowercase = model_dic.get("""thre_bp2""" )
__lowercase = model_dic.get("""thre_bp3""" )
return conv_ins
def snake_case__ ( self : Tuple , lowercase : List[str] ) -> List[str]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def snake_case__ ( self : int , lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return round(lowercase , 3 )
def snake_case__ ( self : Dict , lowercase : List[str] , lowercase : Dict , lowercase : str , lowercase : Any , lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = convs[0]
__lowercase = convs[1]
__lowercase = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
__lowercase = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
__lowercase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowercase = []
__lowercase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
__lowercase = []
for i_focus in range(len(lowercase ) ):
__lowercase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
__lowercase = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
__lowercase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
__lowercase = np.asarray(lowercase )
return focus_list, data_featuremap
def snake_case__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Optional[int]="average_pool" ) -> Tuple:
"""simple docstring"""
__lowercase = len(featuremaps[0] )
__lowercase = int(size_map / size_pooling )
__lowercase = []
for i_map in range(len(lowercase ) ):
__lowercase = featuremaps[i_map]
__lowercase = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
__lowercase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
__lowercase = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def snake_case__ ( self : Optional[int] , lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = []
for i in range(len(lowercase ) ):
__lowercase = np.shape(data[i] )
__lowercase = data[i].reshape(1 , shapes[0] * shapes[1] )
__lowercase = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
__lowercase = np.asarray(lowercase )
return data_expanded
def snake_case__ ( self : Optional[Any] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = np.asarray(lowercase )
__lowercase = np.shape(lowercase )
__lowercase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def snake_case__ ( self : Optional[Any] , lowercase : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = []
__lowercase = 0
for i_map in range(lowercase ):
__lowercase = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
__lowercase = pd_pool[
i_pool
]
__lowercase = i_pool + 1
__lowercase = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def snake_case__ ( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : str=bool ) -> List[str]:
"""simple docstring"""
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
__lowercase = 0
__lowercase = []
__lowercase = 10_000
while rp < n_repeat and mse >= error_accuracy:
__lowercase = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
__lowercase = np.asmatrix(datas_train[p] )
__lowercase = np.asarray(datas_teach[p] )
__lowercase , __lowercase = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(lowercase , self.size_poolinga )
__lowercase = np.shape(lowercase )
__lowercase = self._expand(lowercase )
__lowercase = data_bp_input
__lowercase = np.dot(lowercase , self.vji.T ) - self.thre_bpa
__lowercase = self.sig(lowercase )
__lowercase = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
__lowercase = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowercase = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
__lowercase = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
__lowercase = np.dot(lowercase , self.vji )
__lowercase = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowercase = pd_conva_pooled.T.getA().tolist()
__lowercase = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowercase = self._expand_mat(pd_conva_all[k_conv] )
__lowercase = self.rate_weight * np.dot(lowercase , lowercase )
__lowercase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowercase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowercase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowercase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowercase = self.thre_bpa - pd_k_all * self.rate_thre
__lowercase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowercase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowercase = rp + 1
__lowercase = error_count / patterns
all_mse.append(lowercase )
def draw_error():
__lowercase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def snake_case__ ( self : Optional[int] , lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
__lowercase = np.asmatrix(datas_test[p] )
__lowercase , __lowercase = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(lowercase , self.size_poolinga )
__lowercase = self._expand(lowercase )
__lowercase = data_bp_input
__lowercase = bp_outa * self.vji.T - self.thre_bpa
__lowercase = self.sig(lowercase )
__lowercase = bp_outa * self.wkj.T - self.thre_bpa
__lowercase = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
__lowercase = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = np.asmatrix(lowercase )
__lowercase , __lowercase = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 634 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Optional[int] , lowercase : List[str]=2 , lowercase : List[Any]=3 , lowercase : List[str]=4 , lowercase : Union[str, Any]=2 , lowercase : str=7 , lowercase : List[str]=True , lowercase : Dict=True , lowercase : Optional[int]=True , lowercase : Optional[int]=True , lowercase : Optional[Any]=99 , lowercase : Optional[int]=36 , lowercase : Optional[int]=3 , lowercase : int=4 , lowercase : List[str]=37 , lowercase : List[Any]="gelu" , lowercase : List[str]=0.1 , lowercase : Optional[int]=0.1 , lowercase : Union[str, Any]=512 , lowercase : Tuple=16 , lowercase : Optional[int]=2 , lowercase : Tuple=0.02 , lowercase : List[Any]=6 , lowercase : List[Any]=6 , lowercase : Optional[int]=3 , lowercase : Dict=4 , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=1_000 , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = text_seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowercase = text_seq_length
__lowercase = (image_size // patch_size) ** 2 + 1
__lowercase = self.text_seq_length + self.image_seq_length
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase = bbox[i, j, 3]
__lowercase = bbox[i, j, 1]
__lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase = bbox[i, j, 2]
__lowercase = bbox[i, j, 0]
__lowercase = t
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : List[str] , lowercase : int ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaModel(config=lowercase )
model.to(lowercase )
model.eval()
# text + image
__lowercase = model(lowercase , pixel_values=lowercase )
__lowercase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
__lowercase = model(lowercase , bbox=lowercase , pixel_values=lowercase , token_type_ids=lowercase )
__lowercase = model(lowercase , bbox=lowercase , pixel_values=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowercase = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowercase = model(pixel_values=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : str , lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = LayoutLMvaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Dict , lowercase : List[str] , lowercase : int , lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = LayoutLMvaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , lowercase : Optional[int] , lowercase : Tuple , lowercase : str , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str , lowercase : Optional[int] , lowercase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = LayoutLMvaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ : Optional[int] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case__ ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return True
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = LayoutLMvaModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def snake_case__ ( self : str , lowercase : List[Any] , lowercase : List[str] , lowercase : str=False ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(lowercase )
if model_class in get_values(lowercase ):
__lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase ):
__lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase )
elif model_class in get_values(lowercase ):
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
elif model_class in [
*get_values(lowercase ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
elif model_class in [
*get_values(lowercase ),
]:
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase , )
return inputs_dict
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def snake_case__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LayoutLMvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> int:
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase ) if is_vision_available() else None
@slow
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowercase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase , return_tensors="""pt""" ).pixel_values.to(lowercase )
__lowercase = torch.tensor([[1, 2]] )
__lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowercase = model(
input_ids=input_ids.to(lowercase ) , bbox=bbox.to(lowercase ) , pixel_values=pixel_values.to(lowercase ) , )
# verify the logits
__lowercase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
__lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
| 634 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 1 |
import math
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase : List[str]=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__lowercase = n
__lowercase = [
[math.inf for j in range(0 , lowercase )] for i in range(0 , lowercase )
] # adjacency matrix for weight
__lowercase = [
[math.inf for j in range(0 , lowercase )] for i in range(0 , lowercase )
] # dp[i][j] stores minimum distance from i to j
def snake_case__ ( self : Dict , lowercase : Tuple , lowercase : Any , lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = w
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowercase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def snake_case__ ( self : int , lowercase : str , lowercase : int ) -> Any:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 634 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 634 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 1 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> tuple:
__lowercase , __lowercase , __lowercase = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowercase__ ) or index < 0:
return None
__lowercase = items[random.randint(0 , len(lowercase__ ) - 1 )]
__lowercase = 0
__lowercase , __lowercase , __lowercase = _partition(lowercase__ , lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__ , lowercase__ )
# must be in larger
else:
return quick_select(lowercase__ , index - (m + count) )
| 634 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowercase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase ) - 1
def snake_case__ ( self : Optional[Any] , lowercase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowercase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase ) , 5 ) == 1
return output_values
def snake_case__ ( self : List[str] , lowercase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self : Optional[Any] , lowercase : float = 0.01 ) -> Any:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase , lowercase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowercase , lowercase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 634 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 1 |
UpperCamelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( lowercase__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase__ , lowercase__ ):
__lowercase = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(lowercase__ )
__lowercase = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
__lowercase = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase = B"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
__lowercase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( lowercase__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
__lowercase = (
"""argument should be a bytes-like object or ASCII string, """
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
__lowercase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__lowercase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase = encoded_data[:-padding]
__lowercase = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> List[Any]:
__lowercase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowercase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
__lowercase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
__lowercase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
__lowercase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
__lowercase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
__lowercase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
__lowercase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
__lowercase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
__lowercase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
__lowercase = key.replace("""image_encoder.module""" , """flava.image_model""" )
__lowercase = key.replace("""text_encoder.module""" , """flava.text_model""" )
__lowercase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
__lowercase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
__lowercase = key.replace("""text_projection""" , """flava.text_projection""" )
__lowercase = key.replace("""image_projection""" , """flava.image_projection""" )
__lowercase = value.float()
for key, value in codebook_state_dict.items():
__lowercase = value
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> Optional[Any]:
if config_path is not None:
__lowercase = FlavaConfig.from_pretrained(lowercase__ )
else:
__lowercase = FlavaConfig()
__lowercase = FlavaForPreTraining(lowercase__ ).eval()
__lowercase = convert_dalle_checkpoint(lowercase__ , lowercase__ , save_checkpoint=lowercase__ )
if os.path.exists(lowercase__ ):
__lowercase = torch.load(lowercase__ , map_location="""cpu""" )
else:
__lowercase = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
__lowercase = upgrade_state_dict(lowercase__ , lowercase__ )
hf_model.load_state_dict(lowercase__ )
__lowercase = hf_model.state_dict()
__lowercase = count_parameters(lowercase__ )
__lowercase = count_parameters(lowercase__ ) + count_parameters(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 634 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
__lowercase = model.config
__lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__lowercase = MBartConfig(
is_decoder=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase__ , add_final_layer_norm=lowercase__ , )
return encoder_config, decoder_config
def UpperCAmelCase__ ( lowercase__ ) -> int:
if "encoder.model" in name:
__lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
__lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
__lowercase = """encoder.""" + name
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
__lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
__lowercase = """encoder.layernorm.bias"""
return name
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Any:
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[3] )
__lowercase = int(key_split[5] )
__lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowercase__ , lowercase__=None , lowercase__=False ) -> List[Any]:
# load original model
__lowercase = DonutModel.from_pretrained(lowercase__ ).eval()
# load HuggingFace model
__lowercase , __lowercase = get_configs(lowercase__ )
__lowercase = DonutSwinModel(lowercase__ )
__lowercase = MBartForCausalLM(lowercase__ )
__lowercase = VisionEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
model.eval()
__lowercase = original_model.state_dict()
__lowercase = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# verify results on scanned document
__lowercase = load_dataset("""hf-internal-testing/example-documents""" )
__lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
__lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase__ , from_slow=lowercase__ )
__lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__lowercase = DonutProcessor(lowercase__ , lowercase__ )
__lowercase = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase = """When is the coffee break?"""
__lowercase = task_prompt.replace("""{user_input}""" , lowercase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
__lowercase = original_model.decoder.tokenizer(lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" )[
"""input_ids"""
]
__lowercase = original_model.encoder.model.patch_embed(lowercase__ )
__lowercase , __lowercase = model.encoder.embeddings(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
# verify encoder hidden states
__lowercase = original_model.encoder(lowercase__ )
__lowercase = model.encoder(lowercase__ ).last_hidden_state
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-2 )
# verify decoder hidden states
__lowercase = original_model(lowercase__ , lowercase__ , lowercase__ ).logits
__lowercase = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
UpperCamelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 634 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (EulerDiscreteScheduler,)
lowercase__ : Any = 10
def snake_case__ ( self : Union[str, Any] , **lowercase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase )
return config
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase , use_karras_sigmas=lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowercase )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowercase , lowercase )
__lowercase = model(lowercase , lowercase )
__lowercase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowercase ) )
__lowercase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCAmelCase__ ( lowercase__ = "laptop" ) -> DataFrame:
__lowercase = F"https://www.amazon.in/laptop/s?k={product}"
__lowercase = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__lowercase = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
__lowercase = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__lowercase = item.ha.text
__lowercase = """https://www.amazon.in/""" + item.ha.a["""href"""]
__lowercase = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__lowercase = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__lowercase = """Not available"""
try:
__lowercase = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__lowercase = """"""
try:
__lowercase = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
__lowercase = float("""nan""" )
except AttributeError:
pass
__lowercase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowercase = """ """
__lowercase = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase__ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 634 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 1 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 1 |
import numpy
# List of input, output pairs
UpperCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase__ = [2, 4, 1, 5]
UpperCamelCase__ = len(train_data)
UpperCamelCase__ = 0.009
def UpperCAmelCase__ ( lowercase__ , lowercase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def UpperCAmelCase__ ( lowercase__ ) -> Union[str, Any]:
__lowercase = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowercase__ , lowercase__=m ) -> Union[str, Any]:
__lowercase = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowercase__ ) -> Optional[Any]:
__lowercase = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
__lowercase = get_cost_derivative(i - 1 )
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
__lowercase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def UpperCAmelCase__ ( ) -> Any:
for i in range(len(lowercase__ ) ):
print(("""Actual output value:""", output(lowercase__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowercase__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 634 |
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case__ ( lowercase : ArgumentParser ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self : Dict ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
| 634 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = num_mel_bins
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowercase : Optional[Any] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextFeatureExtractionTester(self )
def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowercase )
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
__lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase = [None, 16, None]
for max_length, padding in zip(lowercase , lowercase ):
__lowercase = feature_extractor(
lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = [np.sum(lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = feature_extractor(
lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , )
__lowercase = inputs.input_features
__lowercase = inputs.attention_mask
__lowercase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
| 634 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : List[Any] = CanineTokenizer
lowercase__ : str = False
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def snake_case__ ( self : List[Any] , **lowercase : Any ) -> CanineTokenizer:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
__lowercase = 1_024
return tokenizer
@require_torch
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.canine_tokenizer
__lowercase = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__lowercase = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.canine_tokenizer
__lowercase = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__lowercase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertIn("""token_type_ids""" , lowercase )
@require_torch
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.canine_tokenizer
__lowercase = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__lowercase = tokenizer(
text_target=lowercase , max_length=32 , padding="""max_length""" , truncation=lowercase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def snake_case__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = """ He is very happy, UNwant\u00E9d,running"""
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__lowercase = tokenizer.__class__.from_pretrained(lowercase )
__lowercase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = """ He is very happy, UNwant\u00E9d,running"""
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0Xe007 )
additional_special_tokens.append(lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__lowercase = tokenizer.__class__.from_pretrained(lowercase )
__lowercase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn(lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(lowercase )
# a special token for Canine can be defined as follows:
__lowercase = 0Xe005
__lowercase = chr(lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
__lowercase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , input_encoded + special_token_id )
__lowercase = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0Xe005 )
__lowercase = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = tokenizer.tokenize(lowercase )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(token_a[0] , lowercase )
self.assertEqual(token_a[0] , lowercase )
@require_tokenizers
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0Xe006
__lowercase = chr(lowercase )
__lowercase = AddedToken(lowercase , lstrip=lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase )
tokenizer.from_pretrained(lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowercase = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowercase = json.load(lowercase )
# a special token for Canine can be defined as follows:
__lowercase = 0Xe006
__lowercase = chr(lowercase )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__lowercase = 0Xe007
__lowercase = chr(lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(lowercase , lstrip=lowercase )]
__lowercase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = """hello world"""
if self.space_between_special_tokens:
__lowercase = """[CLS] hello world [SEP]"""
else:
__lowercase = input
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase , [output, output.lower()] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__lowercase = """a"""
__lowercase = ord(lowercase )
for attr in attributes_list:
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] )
__lowercase = 0Xe006
__lowercase = chr(lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
__lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowercase : List[Any] , lowercase : Any=13 , lowercase : List[Any]=[30, 30] , lowercase : Tuple=2 , lowercase : List[Any]=3 , lowercase : Union[str, Any]=True , lowercase : List[str]=True , lowercase : int=32 , lowercase : str=5 , lowercase : Any=4 , lowercase : str=37 , lowercase : Any="gelu" , lowercase : Any=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : int=10 , lowercase : Tuple=0.02 , lowercase : List[Any]=3 , lowercase : int=None , lowercase : Dict=8 , lowercase : Any=10 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
__lowercase = n_targets
__lowercase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__lowercase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__lowercase = num_patches + 1 + self.num_detection_tokens
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__lowercase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__lowercase = []
for i in range(self.batch_size ):
__lowercase = {}
__lowercase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowercase )
__lowercase = torch.rand(self.n_targets , 4 , device=lowercase )
labels.append(lowercase )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = YolosModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Tuple , lowercase : str ) -> str:
"""simple docstring"""
__lowercase = YolosForObjectDetection(lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(pixel_values=lowercase )
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__lowercase = model(pixel_values=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : int = False
lowercase__ : List[Any] = False
lowercase__ : str = False
def snake_case__ ( self : str , lowercase : Dict , lowercase : int , lowercase : Optional[Any]=False ) -> Any:
"""simple docstring"""
__lowercase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__lowercase = []
for i in range(self.model_tester.batch_size ):
__lowercase = {}
__lowercase = torch.ones(
size=(self.model_tester.n_targets,) , device=lowercase , dtype=torch.long )
__lowercase = torch.ones(
self.model_tester.n_targets , 4 , device=lowercase , dtype=torch.float )
labels.append(lowercase )
__lowercase = labels
return inputs_dict
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = YolosModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
# in YOLOS, the seq_len is different
__lowercase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(lowercase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = 1
self.assertEqual(out_len + added_hidden_states , len(lowercase ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase : Optional[Any] , lowercase : List[str] , lowercase : Union[str, Any] ):
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# YOLOS has a different seq_length
__lowercase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowercase )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = YolosModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(lowercase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
__lowercase = model(inputs.pixel_values )
# verify outputs
__lowercase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowercase , )
__lowercase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase , atol=1E-4 ) )
# verify postprocessing
__lowercase = image_processor.post_process_object_detection(
lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__lowercase = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowercase )
__lowercase = [75, 75, 17, 63, 17]
__lowercase = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowercase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , lowercase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , lowercase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , lowercase ) )
| 634 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 1 |
import math
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase__ = "Enter the base and the power separated by a comma: "
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase__ = res(xa, ya)
UpperCamelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 634 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowercase__ )}"
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowercase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
__lowercase = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowercase : Pipeline , lowercase : PipelineDataFormat ) -> Optional[Any]:
"""simple docstring"""
__lowercase = nlp
__lowercase = reader
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self._nlp, []
for entry in self._reader:
__lowercase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowercase = self._reader.save_binary(lowercase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(lowercase )
| 634 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowercase : Tuple , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = 2
__lowercase = 99
__lowercase = 0
__lowercase = 32
__lowercase = 2
__lowercase = 4
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 512
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = """last"""
__lowercase = True
__lowercase = None
__lowercase = 0
def snake_case__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__lowercase = None
if self.use_input_lengths:
__lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self : Dict , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : int , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertModel(config=lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase = model(lowercase )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict , lowercase : Optional[Any] , lowercase : Any , lowercase : str , lowercase : List[str] , lowercase : str , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertWithLMHeadModel(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int , lowercase : List[Any] , ) -> Dict:
"""simple docstring"""
__lowercase = TFFlaubertForQuestionAnsweringSimple(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : int , lowercase : Any , lowercase : int , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple , lowercase : str , lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFFlaubertForSequenceClassification(lowercase )
__lowercase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any , lowercase : Dict , lowercase : Union[str, Any] , lowercase : str , lowercase : List[str] , lowercase : Any , lowercase : Any , lowercase : int , lowercase : str , lowercase : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFFlaubertForTokenClassification(config=lowercase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : str , lowercase : Dict , lowercase : str , lowercase : int , lowercase : int , lowercase : Any , lowercase : Tuple , lowercase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFFlaubertForMultipleChoice(config=lowercase )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : List[str] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ : Any = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : str = False
lowercase__ : Dict = False
def snake_case__ ( self : List[Any] , lowercase : Any , lowercase : Dict , lowercase : Dict , lowercase : Tuple , lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFFlaubertModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def snake_case__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowercase )
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFFlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__lowercase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__lowercase = model(lowercase )[0]
__lowercase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
__lowercase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 634 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowercase , """depth_multiplier""" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any]=13 , lowercase : Any=3 , lowercase : Any=32 , lowercase : int=0.25 , lowercase : Dict=8 , lowercase : Dict=True , lowercase : Dict=1_024 , lowercase : Dict=32 , lowercase : Tuple="relu6" , lowercase : Optional[Any]=0.1 , lowercase : Any=0.02 , lowercase : str=True , lowercase : List[Any]=True , lowercase : List[str]=10 , lowercase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = min_depth
__lowercase = tf_padding
__lowercase = int(last_hidden_size * depth_multiplier )
__lowercase = output_stride
__lowercase = hidden_act
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def snake_case__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[str] , lowercase : Optional[Any] , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] ) -> Any:
"""simple docstring"""
__lowercase = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Tuple , lowercase : str , lowercase : str , lowercase : str , lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ : Tuple = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Tuple = False
def snake_case__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = MobileNetVaModelTester(self )
__lowercase = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def snake_case__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] ):
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = 26
self.assertEqual(len(lowercase ) , lowercase )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def snake_case__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def snake_case__ ( self : Dict ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowercase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase )
# verify the logits
__lowercase = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
__lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 634 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : List[Any] = PegasusConfig
lowercase__ : Tuple = {}
lowercase__ : Any = """gelu"""
def __init__( self : Any , lowercase : List[str] , lowercase : List[Any]=13 , lowercase : Any=7 , lowercase : Union[str, Any]=True , lowercase : Any=False , lowercase : Optional[int]=99 , lowercase : int=32 , lowercase : str=5 , lowercase : Union[str, Any]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]=0.1 , lowercase : int=0.1 , lowercase : Dict=20 , lowercase : str=2 , lowercase : Any=1 , lowercase : str=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def snake_case__ ( self : str , lowercase : List[str] , lowercase : List[str] , lowercase : int ) -> int:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def snake_case__ ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> Tuple:
if attention_mask is None:
__lowercase = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ : Union[str, Any] = True
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = FlaxPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase : int , lowercase : Dict=None , **lowercase : Any ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__lowercase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowercase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase : Tuple , lowercase : Tuple , lowercase : Optional[Any] ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__lowercase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__lowercase = np.ones((1, 1) )
__lowercase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__lowercase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__lowercase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__lowercase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__lowercase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=512 , padding=lowercase )
__lowercase = model.generate(**lowercase , num_beams=2 ).sequences
__lowercase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 634 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def snake_case__ ( lowercase : Optional[int] , lowercase : Any ) -> Dict:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
__lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def snake_case__ ( self : Dict , lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = list(struct.unpack(""">16L""" , lowercase ) ) + [0] * 64
for i in range(16 , 80 ):
__lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.padding()
__lowercase = self.split_blocks()
for block in self.blocks:
__lowercase = self.expand_block(lowercase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__lowercase = (b & c) | ((~b) & d)
__lowercase = 0X5a82_7999
elif 20 <= i < 40:
__lowercase = b ^ c ^ d
__lowercase = 0X6ed9_eba1
elif 40 <= i < 60:
__lowercase = (b & c) | (b & d) | (c & d)
__lowercase = 0X8f1b_bcdc
elif 60 <= i < 80:
__lowercase = b ^ c ^ d
__lowercase = 0Xca62_c1d6
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = (
self.rotate(lowercase , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(lowercase , 30 ),
c,
d,
)
__lowercase = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = B"""Test String"""
assert SHAaHash(lowercase__ ).final_hash() == hashlib.shaa(lowercase__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ) -> int:
__lowercase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__lowercase = parser.parse_args()
__lowercase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__lowercase = f.read()
else:
__lowercase = bytes(lowercase__ , """utf-8""" )
print(SHAaHash(lowercase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 634 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
__lowercase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
def snake_case__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str]=False , lowercase : List[str]=20 , lowercase : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
__lowercase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )]
__lowercase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__lowercase = """ """ + output_txt
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def snake_case__ ( self : Tuple , **lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__lowercase = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__lowercase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("""maɪ c""" , do_phonemize=lowercase ).input_ids
self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids )
def snake_case__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase )
__lowercase = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , batch_tokens[0] )
self.assertEqual(lowercase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__lowercase = """Hello how are you"""
__lowercase = tokenizer.phonemize(lowercase , phonemizer_lang="""en-us""" )
__lowercase = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase )
__lowercase = """Hello how are you"""
__lowercase = tokenizer(lowercase , phonemizer_lang="""en-us""" ).input_ids
__lowercase = tokenizer(lowercase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase , lowercase )
__lowercase = tokenizer.decode(lowercase )
__lowercase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__lowercase = """Hello how Are you"""
__lowercase = """hello how are you"""
__lowercase = tokenizer(lowercase ).input_ids
__lowercase = tokenizer(lowercase ).input_ids
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( lowercase : List[str] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowercase = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase , lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase : List[str] , lowercase : Dict ):
self.assertTrue(isinstance(lowercase , lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase : List[Any] , lowercase : Optional[int] ):
if isinstance(lowercase , lowercase ):
[recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )]
self.assertEqual(lowercase , lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__lowercase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase )
__lowercase = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids]
check_list_tuples_equal(lowercase , lowercase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(lowercase )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__lowercase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(output["""text"""] , lowercase )
| 634 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Union[str, Any] = BlenderbotSmallConfig
lowercase__ : List[Any] = {}
lowercase__ : int = """gelu"""
def __init__( self : Optional[Any] , lowercase : List[str] , lowercase : List[Any]=13 , lowercase : List[str]=7 , lowercase : str=True , lowercase : List[Any]=False , lowercase : Optional[Any]=99 , lowercase : Optional[Any]=32 , lowercase : Any=2 , lowercase : Union[str, Any]=4 , lowercase : Tuple=37 , lowercase : Optional[int]=0.1 , lowercase : List[Any]=0.1 , lowercase : Any=20 , lowercase : Dict=2 , lowercase : Optional[int]=1 , lowercase : str=0 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def snake_case__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def snake_case__ ( self : Optional[int] , lowercase : Optional[Any] , lowercase : Any ) -> Dict:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
__lowercase = inputs_dict["""input_ids"""]
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict["""attention_mask"""][:1, :]
__lowercase = inputs_dict["""head_mask"""]
__lowercase = 1
# first forward pass
__lowercase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(lowercase , attention_mask=lowercase )[0]
__lowercase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ) -> int:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : List[str] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase__ : List[str] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase__ : Optional[Any] = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ : str = True
lowercase__ : Any = False
lowercase__ : int = False
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase )
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ : List[str] = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
lowercase__ : Optional[Any] = """facebook/blenderbot_small-90M"""
@cached_property
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , return_tensors="""tf""" )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 634 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowercase : List[Any] , **lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
requires_backends(self , """decord""" )
self.check_model_type(lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Optional[int]=None , lowercase : List[str]=None , lowercase : str=None ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {}
if frame_sampling_rate is not None:
__lowercase = frame_sampling_rate
if num_frames is not None:
__lowercase = num_frames
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , lowercase : Union[str, List[str]] , **lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
return super().__call__(lowercase , **lowercase )
def snake_case__ ( self : Tuple , lowercase : Union[str, Any] , lowercase : List[str]=None , lowercase : Dict=1 ) -> Any:
"""simple docstring"""
if num_frames is None:
__lowercase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowercase = BytesIO(requests.get(lowercase ).content )
__lowercase = VideoReader(lowercase )
videoreader.seek(0 )
__lowercase = 0
__lowercase = num_frames * frame_sampling_rate - 1
__lowercase = np.linspace(lowercase , lowercase , num=lowercase , dtype=np.intaa )
__lowercase = videoreader.get_batch(lowercase ).asnumpy()
__lowercase = list(lowercase )
__lowercase = self.image_processor(lowercase , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Tuple , lowercase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.model(**lowercase )
return model_outputs
def snake_case__ ( self : Optional[int] , lowercase : Optional[Any] , lowercase : Tuple=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(lowercase )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 634 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> int:
__lowercase = 1.5
__lowercase = int(factor * num_class_images )
__lowercase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=lowercase__ )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowercase = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__lowercase = int(factor * num_images )
__lowercase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowercase__ , aesthetic_weight=0.1 , )
__lowercase = 0
__lowercase = 0
__lowercase = tqdm(desc="""downloading real regularization images""" , total=lowercase__ )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
__lowercase = class_images[count]
count += 1
try:
__lowercase = requests.get(images["""url"""] )
if img.status_code == 200:
__lowercase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase__ ( ) -> Optional[int]:
__lowercase = argparse.ArgumentParser("""""" , add_help=lowercase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowercase__ , type=lowercase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowercase__ , type=lowercase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def UpperCAmelCase__ ( ) -> List[Any]:
__lowercase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowercase = parser.parse_args()
return args.f
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase )
def snake_case__ ( self : Dict , lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowercase , """argv""" , lowercase ):
__lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowercase )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowercase )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowercase )
| 634 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase : ArgumentParser ) -> str:
"""simple docstring"""
__lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=lowercase , default=lowercase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=lowercase , help="""Name of the model to download""" )
download_parser.set_defaults(func=lowercase )
def __init__( self : int , lowercase : str , lowercase : str , lowercase : bool , lowercase : bool ) -> Any:
"""simple docstring"""
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def snake_case__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 634 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : int = """rag"""
lowercase__ : int = True
def __init__( self : Optional[Any] , lowercase : Dict=None , lowercase : Optional[int]=True , lowercase : str=None , lowercase : str=None , lowercase : Dict=None , lowercase : str=None , lowercase : str=None , lowercase : Any=" / " , lowercase : int=" // " , lowercase : str=5 , lowercase : int=300 , lowercase : Any=768 , lowercase : List[str]=8 , lowercase : Any="wiki_dpr" , lowercase : Optional[Any]="train" , lowercase : Optional[Any]="compressed" , lowercase : Dict=None , lowercase : Any=None , lowercase : Optional[int]=False , lowercase : List[str]=False , lowercase : str=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=False , lowercase : List[Any]=False , lowercase : int=False , lowercase : List[Any]=True , lowercase : Dict=None , **lowercase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=lowercase , pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , is_encoder_decoder=lowercase , prefix=lowercase , vocab_size=lowercase , **lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowercase = kwargs.pop("""question_encoder""" )
__lowercase = question_encoder_config.pop("""model_type""" )
__lowercase = kwargs.pop("""generator""" )
__lowercase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__lowercase = AutoConfig.for_model(lowercase , **lowercase )
__lowercase = AutoConfig.for_model(lowercase , **lowercase )
__lowercase = reduce_loss
__lowercase = label_smoothing
__lowercase = exclude_bos_score
__lowercase = do_marginalize
__lowercase = title_sep
__lowercase = doc_sep
__lowercase = n_docs
__lowercase = max_combined_length
__lowercase = dataset
__lowercase = dataset_split
__lowercase = index_name
__lowercase = retrieval_vector_size
__lowercase = retrieval_batch_size
__lowercase = passages_path
__lowercase = index_path
__lowercase = use_dummy_dataset
__lowercase = output_retrieved
__lowercase = do_deduplication
__lowercase = use_cache
if self.forced_eos_token_id is None:
__lowercase = getattr(self.generator , """forced_eos_token_id""" , lowercase )
@classmethod
def snake_case__ ( cls : Optional[Any] , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : int ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.question_encoder.to_dict()
__lowercase = self.generator.to_dict()
__lowercase = self.__class__.model_type
return output
| 634 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ = {
"google/rembert": 2_56,
}
UpperCamelCase__ = "▁"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = RemBertTokenizer
def __init__( self : Tuple , lowercase : int=None , lowercase : str=None , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=True , lowercase : Optional[int]=False , lowercase : Optional[Any]="[CLS]" , lowercase : Dict="[SEP]" , lowercase : Tuple="<unk>" , lowercase : str="[SEP]" , lowercase : str="<pad>" , lowercase : int="[CLS]" , lowercase : Tuple="[MASK]" , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , **lowercase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : int , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowercase ) )
return
__lowercase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 634 |
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for row in range(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = matrix[row][col]
__lowercase = vector[row][0]
__lowercase = 0
__lowercase = 0
while row < size and col < size:
# pivoting
__lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowercase , __lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
__lowercase = augmented[rowa][col] / augmented[row][col]
__lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
__lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]:
__lowercase = len(lowercase__ )
__lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
__lowercase = [[0] for _ in range(lowercase__ )]
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
__lowercase = (x_val + 1) ** (size - col - 1)
__lowercase = y_val
__lowercase = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowercase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int:
__lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
__lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowercase = 0
__lowercase = 42
__lowercase = 42
for poly in polynomials:
__lowercase = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.