code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase : List[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase : Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[str] = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE_ ))
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Tuple = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
UpperCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = list(SCREAMING_SNAKE_CASE_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCamelCase : Union[str, Any] = random.choice(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : tuple[str, float] , SCREAMING_SNAKE_CASE_ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE_ : list[str] , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
UpperCamelCase : Any = int(parent_a[1] * 1_0_0 ) + 1
UpperCamelCase : Any = 1_0 if child_n >= 1_0 else child_n
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return pop
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
UpperCamelCase : Dict = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase : Tuple = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Generate random starting population.
UpperCamelCase : Any = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase , UpperCamelCase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase : Dict = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase : List[Any] = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase : List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE_ )
# Normalize population score to be between 0 and 1.
UpperCamelCase : Any = [
(item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE_ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__UpperCAmelCase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 315
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = ["image_processor", "tokenizer"]
__UpperCamelCase : List[Any] = "FlavaImageProcessor"
__UpperCamelCase : Optional[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase : Tuple = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if images is not None:
UpperCamelCase : Optional[int] = self.image_processor(
__SCREAMING_SNAKE_CASE , return_image_mask=__SCREAMING_SNAKE_CASE , return_codebook_pixels=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(__SCREAMING_SNAKE_CASE )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.tokenizer.model_input_names
UpperCamelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCAmelCase : List[Any] = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
__UpperCAmelCase : List[Any] = parser.parse_args()
if args.check_lib:
__UpperCAmelCase : Tuple = importlib.import_module("transformers")
__UpperCAmelCase : Any = Path(transformers_module.__file__).parent
else:
__UpperCAmelCase : List[str] = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 315
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : str = decoder_seq_length
# For common tests
UpperCamelCase : Optional[int] = self.decoder_seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_attention_mask
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : int = d_model
UpperCamelCase : List[str] = d_model
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : str = decoder_layers
UpperCamelCase : Dict = decoder_ffn_dim
UpperCamelCase : Any = decoder_attention_heads
UpperCamelCase : int = decoder_attention_heads
UpperCamelCase : Dict = eos_token_id
UpperCamelCase : Any = bos_token_id
UpperCamelCase : List[str] = pad_token_id
UpperCamelCase : Optional[int] = decoder_start_token_id
UpperCamelCase : Dict = use_cache
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : str = None
UpperCamelCase : Optional[Any] = decoder_seq_length
UpperCamelCase : List[str] = 2
UpperCamelCase : List[str] = 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_attention_mask:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = TrOCRDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
UpperCamelCase : Tuple = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
"""simple docstring"""
pass
| 315
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "markuplm"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1_001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : Optional[int] = classifier_dropout
# additional properties
UpperCamelCase : Tuple = max_depth
UpperCamelCase : List[Any] = max_xpath_tag_unit_embeddings
UpperCamelCase : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase : Optional[int] = tag_pad_id
UpperCamelCase : Dict = subs_pad_id
UpperCamelCase : Optional[Any] = xpath_unit_hidden_size
| 315
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : Dict = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Optional[int] = num_labels
UpperCamelCase : Optional[int] = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Union[str, Any] = projection_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
if self.use_token_type_ids:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : str = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
UpperCamelCase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = TFDPRContextEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRQuestionEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFDPRReader(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[Any] = config_and_inputs
UpperCamelCase : Union[str, Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[int] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = TFDPRModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = TFDPRReader.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCamelCase : Optional[int] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase : Optional[Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 315
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = "dpt"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=384 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[2, 5, 8, 11] , __SCREAMING_SNAKE_CASE="project" , __SCREAMING_SNAKE_CASE=[4, 2, 1, 0.5] , __SCREAMING_SNAKE_CASE=[96, 192, 384, 768] , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.4 , __SCREAMING_SNAKE_CASE=255 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=[1, 1_024, 24, 24] , __SCREAMING_SNAKE_CASE=[0, 1] , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = hidden_size
UpperCamelCase : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCamelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
UpperCamelCase : Any = BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCamelCase : Any = BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
UpperCamelCase : Optional[int] = backbone_featmap_shape
UpperCamelCase : int = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
UpperCamelCase : str = None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = []
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : Optional[int] = qkv_bias
UpperCamelCase : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
UpperCamelCase : Dict = readout_type
UpperCamelCase : Optional[Any] = reassemble_factors
UpperCamelCase : Tuple = neck_hidden_sizes
UpperCamelCase : str = fusion_hidden_size
UpperCamelCase : int = head_in_index
UpperCamelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase : Optional[int] = use_auxiliary_head
UpperCamelCase : Optional[Any] = auxiliary_loss_weight
UpperCamelCase : List[Any] = semantic_loss_ignore_index
UpperCamelCase : List[Any] = semantic_classifier_dropout
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase : Optional[Any] = self.backbone_config.to_dict()
UpperCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 315
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 1
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
'''simple docstring'''
__UpperCamelCase : Tuple = PandasConfig
def _lowercase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCamelCase : List[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : List[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCamelCase : Dict = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : Dict = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase : Optional[Any] = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
UpperCamelCase : List[Any] = pa.Table.from_pandas(pd.read_pickle(__SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(__SCREAMING_SNAKE_CASE )
| 315
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : int = size if size is not None else {'''shortest_edge''': 20}
UpperCamelCase : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : Dict = image_size
UpperCamelCase : int = min_resolution
UpperCamelCase : List[str] = max_resolution
UpperCamelCase : Dict = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : Dict = do_center_crop
UpperCamelCase : Optional[int] = crop_size
UpperCamelCase : Tuple = do_flip_channel_order
def _lowercase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Dict = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 315
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__UpperCAmelCase : str = '''src/transformers'''
# Matches is_xxx_available()
__UpperCAmelCase : str = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : List[str] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : Any = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : Any = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : str = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : str = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : Optional[int] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(_UpperCAmelCase ) is None:
return None
UpperCamelCase : Dict = [b[0] for b in _re_backend.findall(_UpperCAmelCase )]
backends.sort()
return "_and_".join(_UpperCAmelCase )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Dict = f.readlines()
UpperCamelCase : Optional[int] = 0
while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : Dict = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCAmelCase ):
UpperCamelCase : Tuple = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0]
UpperCamelCase : str = re.findall('''\[([^\]]+)\]''' , _UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : int = _re_import_struct_key_value.search(_UpperCAmelCase )
if single_line_import_search is not None:
UpperCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Union[str, Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None:
UpperCamelCase : Any = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(''', ''' )
UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_between_brackets.search(_UpperCAmelCase ) is not None:
UpperCamelCase : List[Any] = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_quote_object.search(_UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : str = []
while (
line_index < len(_UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Any = lines[line_index]
UpperCamelCase : Tuple = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : List[str] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[int] = lines[line_index]
UpperCamelCase : Optional[int] = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Tuple ):
return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Optional[Any] = []
for key in import_dict_objects.keys():
UpperCamelCase : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : Tuple = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
UpperCamelCase : Any = os.path.join(_UpperCAmelCase , '''__init__.py''' )
UpperCamelCase : Union[str, Any] = parse_init(_UpperCAmelCase )
if objects is not None:
UpperCamelCase : Tuple = analyze_results(*_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
UpperCamelCase : Any = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCAmelCase ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
for path, directories, files in os.walk(_UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : Dict = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) )
UpperCamelCase : Any = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Any = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) )
UpperCamelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCAmelCase )
return submodules
__UpperCAmelCase : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_UpperCAmelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCamelCase : Tuple = spec.loader.load_module()
UpperCamelCase : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_UpperCAmelCase ) > 0:
UpperCamelCase : Any = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : int = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : List[Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self ):
"""simple docstring"""
(
UpperCamelCase
) : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = NezhaModel(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : str = model(__A , attention_mask=__A , token_type_ids=__A )
UpperCamelCase : int = model(__A , token_type_ids=__A )
UpperCamelCase : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = NezhaModel(__A )
model.to(__A )
model.eval()
UpperCamelCase : str = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
UpperCamelCase : Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
UpperCamelCase : str = model(__A , attention_mask=__A , token_type_ids=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = NezhaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : List[str] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = NezhaForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = NezhaForPreTraining(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = NezhaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : Tuple = NezhaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCamelCase : List[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : List[str] = NezhaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : int = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = self.num_choices
UpperCamelCase : Dict = NezhaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : int = config_and_inputs
UpperCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A__, A__, A__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase : str = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
UpperCamelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
UpperCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = NezhaModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=__A , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def _lowercase ( self ):
"""simple docstring"""
(
UpperCamelCase
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase : Any = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = NezhaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCamelCase : int = True
UpperCamelCase : Optional[int] = model_class(config=__A )
UpperCamelCase : Any = self._prepare_for_class(__A , __A )
UpperCamelCase : Optional[int] = torch.jit.trace(
__A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , '''bert.pt''' ) )
UpperCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__A , '''bert.pt''' ) , map_location=__A )
loaded(inputs_dict['''input_ids'''].to(__A ) , inputs_dict['''attention_mask'''].to(__A ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Dict = model(__A , attention_mask=__A )[0]
UpperCamelCase : int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
UpperCamelCase : Optional[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCamelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(__A , attention_mask=__A )[0]
UpperCamelCase : Dict = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , __A )
UpperCamelCase : str = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a ( SCREAMING_SNAKE_CASE_ : List[Any] = 8 ):
"""simple docstring"""
UpperCamelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
i -= len(__UpperCAmelCase )
UpperCamelCase : int = i // 3
UpperCamelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase : Tuple = (
chars_incl
+ random(__UpperCAmelCase , quotient + remainder )
+ random(__UpperCAmelCase , __UpperCAmelCase )
+ random(__UpperCAmelCase , __UpperCAmelCase )
)
UpperCamelCase : Any = list(__UpperCAmelCase )
shuffle(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
# random is a generalised function for letters, characters and numbers
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
pass # Put your code here...
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
pass # Put your code here...
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
pass # Put your code here...
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] = 8 ):
"""simple docstring"""
if len(__UpperCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase : int = any(char in ascii_uppercase for char in password )
UpperCamelCase : Dict = any(char in ascii_lowercase for char in password )
UpperCamelCase : Any = any(char in digits for char in password )
UpperCamelCase : Dict = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = int(input('''Please indicate the max length of your password: ''' ).strip() )
UpperCamelCase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(__UpperCAmelCase ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(__UpperCAmelCase , __UpperCAmelCase ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if len(__lowerCAmelCase ) < k or k < 0:
raise ValueError('''Invalid Input''' )
UpperCamelCase : Any = sum(array[:k] )
for i in range(len(__lowerCAmelCase ) - k ):
UpperCamelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCamelCase : List[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase : Optional[Any] = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase : List[str] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
import numpy as np
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCamelCase__ , (alpha * (np.exp(lowerCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : Dict = tmp_path / 'cache'
UpperCamelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : List[Any] = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
UpperCamelCase : Tuple = tmp_path / 'cache'
UpperCamelCase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase : Dict = features.copy() if features else default_expected_features
UpperCamelCase : List[str] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Optional[Any] = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : Any = tmp_path / 'cache'
UpperCamelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase : Dict = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if issubclass(__a , __a ):
UpperCamelCase : Union[str, Any] = parquet_path
elif issubclass(__a , __a ):
UpperCamelCase : Dict = [parquet_path]
UpperCamelCase : List[str] = tmp_path / 'cache'
UpperCamelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase : Tuple = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=("train",) ):
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
UpperCamelCase : Any = tmp_path / 'cache'
UpperCamelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : Union[str, Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
UpperCamelCase : Optional[Any] = tmp_path / 'cache'
UpperCamelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase : List[Any] = features.copy() if features else default_expected_features
UpperCamelCase : int = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : List[Any] = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
if split:
UpperCamelCase : Dict = {split: parquet_path}
else:
UpperCamelCase : Optional[int] = 'train'
UpperCamelCase : Dict = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase : str = tmp_path / 'cache'
UpperCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase : Optional[int] = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
UpperCamelCase : int = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCamelCase : Tuple = pq.ParquetFile(tmp_path / '''foo.parquet''' )
UpperCamelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
UpperCamelCase : Dict = str(shared_datadir / '''test_image_rgb.jpg''' )
UpperCamelCase : List[Any] = {'image': [image_path]}
UpperCamelCase : List[Any] = Features({'''image''': Image()} )
UpperCamelCase : Optional[int] = Dataset.from_dict(__a , features=__a )
UpperCamelCase : List[Any] = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCamelCase : Optional[Any] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase : str = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
assert get_writer_batch_size(__a ) == expected
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
UpperCamelCase : List[str] = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCamelCase : Dict = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
UpperCamelCase : List[str] = '''.'''.join(__snake_case )
return test_module_path
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_module_path(__snake_case )
UpperCamelCase : Optional[int] = importlib.import_module(__snake_case )
return test_module
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Tuple = []
UpperCamelCase : Any = get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
UpperCamelCase : int = get_test_module(__snake_case )
for attr in dir(__snake_case ):
UpperCamelCase : Optional[int] = getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase : Optional[Any] = getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = get_test_classes(__snake_case )
UpperCamelCase : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[str] = test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
UpperCamelCase : List[str] = None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase : int = test.model_tester.__class__
return model_tester
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_test_classes(__snake_case )
UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_test_classes_for_model(__snake_case , __snake_case )
UpperCamelCase : Optional[int] = []
for test_class in test_classes:
UpperCamelCase : List[str] = get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : List[str] = get_test_classes(__snake_case )
UpperCamelCase : Optional[int] = {test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_model_classes(__snake_case )
UpperCamelCase : Dict = {
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Any = get_model_classes(__snake_case )
UpperCamelCase : Any = {
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : str = len(__A )
# We need to create solution object to save path.
UpperCamelCase : Any = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCamelCase : Any = run_maze(__A , 0 , 0 , __A )
if solved:
print('''\n'''.join(str(__A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : List[str] = len(__A )
# Final check point.
if i == j == (size - 1):
UpperCamelCase : List[Any] = 1
return True
UpperCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase : Optional[int] = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
UpperCamelCase : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase : Optional[Any] = 'pt'
elif is_tf_available():
__UpperCAmelCase : Optional[int] = 'tf'
else:
__UpperCAmelCase : List[Any] = 'jax'
class UpperCAmelCase_ ( lowercase__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Any = ByTaTokenizer
__UpperCamelCase : Optional[int] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
for i in range(len(_UpperCamelCase ) ):
try:
UpperCamelCase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase : Optional[int] = list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _UpperCamelCase ) )
UpperCamelCase : int = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
UpperCamelCase : Dict = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
UpperCamelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase : Optional[Any] = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
UpperCamelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
UpperCamelCase : str = """ """ + output_txt
UpperCamelCase : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.ta_base_tokenizer
UpperCamelCase : Dict = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
UpperCamelCase : Union[str, Any] = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.ta_base_tokenizer
UpperCamelCase : List[Any] = """Unicode €."""
UpperCamelCase : Any = tokenizer(_UpperCamelCase )
UpperCamelCase : int = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
UpperCamelCase : Union[str, Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''Unicode €.</s>''' )
UpperCamelCase : Optional[int] = tokenizer('''e è é ê ë''' )
UpperCamelCase : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
UpperCamelCase : Optional[int] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.ta_base_tokenizer
UpperCamelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCamelCase : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCamelCase : Union[str, Any] = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
UpperCamelCase : List[str] = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.ta_base_tokenizer
UpperCamelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase : Dict = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _UpperCamelCase )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCamelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.ta_base_tokenizer
UpperCamelCase : str = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCamelCase : int = tokenizer(
text_target=_UpperCamelCase , max_length=32 , padding='''max_length''' , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.ta_base_tokenizer
UpperCamelCase : int = ["""A long paragraph for summarization. </s>"""]
UpperCamelCase : Any = ["""Summary of the text. </s>"""]
# fmt: off
UpperCamelCase : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCamelCase : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCamelCase : Dict = tokenizer(_UpperCamelCase , text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch['''input_ids'''][0] )
self.assertEqual(_UpperCamelCase , batch['''labels'''][0] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : str = tempfile.mkdtemp()
UpperCamelCase : int = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCamelCase : List[str] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
UpperCamelCase : Union[str, Any] = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
UpperCamelCase : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : Optional[int] = tempfile.mkdtemp()
UpperCamelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCamelCase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCamelCase : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
UpperCamelCase : Optional[Any] = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase : List[str] = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCamelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCamelCase : str = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCamelCase : Any = json.load(_UpperCamelCase )
UpperCamelCase : Any = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCamelCase : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCamelCase : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase : str = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase : List[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_UpperCamelCase )]
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
UpperCamelCase : Optional[Any] = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase : Tuple = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase : List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCamelCase : Tuple = 0
UpperCamelCase : int = tokenizer.convert_ids_to_tokens(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Dict = 1_0 , SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 , SCREAMING_SNAKE_CASE_ : List[str] = True ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
UpperCamelCase : Dict = lower
UpperCamelCase : Union[str, Any] = higher
UpperCamelCase : Dict = []
while True:
UpperCamelCase : Optional[Any] = get_avg(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
last_numbers.append(SCREAMING_SNAKE_CASE_ )
if answer(SCREAMING_SNAKE_CASE_ ) == "low":
UpperCamelCase : List[Any] = number
elif answer(SCREAMING_SNAKE_CASE_ ) == "high":
UpperCamelCase : Optional[Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = int(input('''Enter lower value : ''' ).strip() )
UpperCamelCase : Optional[int] = int(input('''Enter high value : ''' ).strip() )
UpperCamelCase : Dict = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Tuple = list(range(len(__lowerCamelCase ) ) )
UpperCamelCase : str = [v / w for v, w in zip(__lowerCamelCase , __lowerCamelCase )]
index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=__lowerCamelCase )
UpperCamelCase : float = 0
UpperCamelCase : list[float] = [0] * len(__lowerCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase : str = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__UpperCAmelCase : Optional[int] = "bert-base-cased"
__UpperCAmelCase : str = "fp16"
__UpperCAmelCase : Optional[int] = "bf16"
__UpperCAmelCase : Tuple = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : List[Any] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def _lowercase ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
UpperCamelCase : Union[str, Any] = self.dist_env.copy()
UpperCamelCase : Union[str, Any] = f"""{i + 1}"""
UpperCamelCase : Optional[Any] = strategy
with mockenv_context(**_a ):
UpperCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
UpperCamelCase : List[Any] = self.dist_env.copy()
UpperCamelCase : List[str] = prefetch_policy
with mockenv_context(**_a ):
UpperCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
UpperCamelCase : Any = self.dist_env.copy()
UpperCamelCase : Union[str, Any] = state_dict_type
with mockenv_context(**_a ):
UpperCamelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase : Optional[int] = self.dist_env.copy()
UpperCamelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase : Optional[Any] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase : Optional[Any] = '''2000'''
with mockenv_context(**_a ):
UpperCamelCase : int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase : List[str] = self.dist_env.copy()
UpperCamelCase : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
UpperCamelCase : int = '''T5Layer'''
with mockenv_context(**_a ):
UpperCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
UpperCamelCase : Optional[int] = self.dist_env.copy()
UpperCamelCase : Any = '''SIZE_BASED_WRAP'''
UpperCamelCase : List[Any] = '''0'''
with mockenv_context(**_a ):
UpperCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase : List[str] = self.dist_env.copy()
UpperCamelCase : Optional[Any] = mp_dtype
with mockenv_context(**_a ):
UpperCamelCase : str = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase : Optional[Any] = torch.bfloataa
UpperCamelCase : Union[str, Any] = MixedPrecision(param_dtype=_a , reduce_dtype=_a , buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def _lowercase ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase : Dict = self.dist_env.copy()
UpperCamelCase : List[Any] = str(_a ).lower()
with mockenv_context(**_a ):
UpperCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : int = 0.82
UpperCamelCase : List[Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
UpperCamelCase : Optional[int] = {
'''multi_gpu_fp16''': 3_200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase : int = 160
UpperCamelCase : Union[str, Any] = 160
UpperCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
UpperCamelCase : Union[str, Any] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
UpperCamelCase : Any = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
UpperCamelCase : Union[str, Any] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_a ):
UpperCamelCase : str = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase : Union[str, Any] = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase : int = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
UpperCamelCase : int = cmd_config[:-1]
UpperCamelCase : Dict = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
UpperCamelCase : Any = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase : Optional[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase : Optional[Any] = "RegNetConfig"
# Base docstring
__UpperCAmelCase : Optional[int] = "facebook/regnet-y-040"
__UpperCAmelCase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
__UpperCAmelCase : int = "facebook/regnet-y-040"
__UpperCAmelCase : Tuple = "tabby, tabby cat"
__UpperCAmelCase : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "relu" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , strides=__SCREAMING_SNAKE_CASE , padding='''VALID''' , groups=__SCREAMING_SNAKE_CASE , use_bias=__SCREAMING_SNAKE_CASE , name='''convolution''' , )
UpperCamelCase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
UpperCamelCase : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.convolution(self.padding(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = self.normalization(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = config.num_channels
UpperCamelCase : int = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = shape_list(__SCREAMING_SNAKE_CASE )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase : Optional[int] = tf.transpose(__SCREAMING_SNAKE_CASE , perm=(0, 2, 3, 1) )
UpperCamelCase : Dict = self.embedder(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=__SCREAMING_SNAKE_CASE , kernel_size=1 , strides=__SCREAMING_SNAKE_CASE , use_bias=__SCREAMING_SNAKE_CASE , name='''convolution''' )
UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
return self.normalization(self.convolution(__SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__SCREAMING_SNAKE_CASE , name='''pooler''' )
UpperCamelCase : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=__SCREAMING_SNAKE_CASE , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__SCREAMING_SNAKE_CASE , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.pooler(__SCREAMING_SNAKE_CASE )
for layer_module in self.attention:
UpperCamelCase : List[str] = layer_module(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = in_channels != out_channels or stride != 1
UpperCamelCase : Dict = max(1 , out_channels // config.groups_width )
UpperCamelCase : Any = (
TFRegNetShortCut(__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase : str = [
TFRegNetConvLayer(__SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE , name='''layer.2''' ),
]
UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
UpperCamelCase : Optional[int] = layer_module(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase : Tuple = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = in_channels != out_channels or stride != 1
UpperCamelCase : List[Any] = max(1 , out_channels // config.groups_width )
UpperCamelCase : List[Any] = (
TFRegNetShortCut(__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
UpperCamelCase : List[str] = [
TFRegNetConvLayer(__SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE , name='''layer.3''' ),
]
UpperCamelCase : List[str] = ACTaFN[config.hidden_act]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase : Optional[Any] = layer_module(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase : Tuple = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 2 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , name='''layers.0''' ),
*[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for layer_module in self.layers:
UpperCamelCase : Dict = layer_module(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
UpperCamelCase : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__SCREAMING_SNAKE_CASE , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE , name=f"""stages.{i+1}""" ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
UpperCamelCase : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
UpperCamelCase : Any = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCamelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE )
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer):
'''simple docstring'''
__UpperCamelCase : Any = RegNetConfig
def __init__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = TFRegNetEmbeddings(__SCREAMING_SNAKE_CASE , name='''embedder''' )
UpperCamelCase : int = TFRegNetEncoder(__SCREAMING_SNAKE_CASE , name='''encoder''' )
UpperCamelCase : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__SCREAMING_SNAKE_CASE , name='''pooler''' )
@unpack_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : Dict = self.embedder(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = encoder_outputs[0]
UpperCamelCase : str = self.pooler(__SCREAMING_SNAKE_CASE )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase : Any = tf.transpose(__SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
UpperCamelCase : List[str] = tf.transpose(__SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase : int = tuple([tf.transpose(__SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = RegNetConfig
__UpperCamelCase : Optional[int] = "regnet"
__UpperCamelCase : Any = "pixel_values"
@property
def _lowercase ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCAmelCase : int = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Any = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = TFRegNetMainLayer(__SCREAMING_SNAKE_CASE , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : List[Any] = self.regnet(
pixel_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _a, )
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = config.num_labels
UpperCamelCase : int = TFRegNetMainLayer(__SCREAMING_SNAKE_CASE , name='''regnet''' )
# classification head
UpperCamelCase : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
UpperCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : Tuple = self.regnet(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase : Any = self.classifier[0](__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.classifier[1](__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCamelCase : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
from sklearn.metrics import recall_score
import datasets
__UpperCAmelCase : Optional[Any] = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCAmelCase : Optional[int] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCAmelCase : Dict = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="binary" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="warn" , ):
"""simple docstring"""
UpperCamelCase : int = recall_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , )
return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( a_):
'''simple docstring'''
__UpperCamelCase : str = '''canine'''
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=16_384 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0xe0_00 , __SCREAMING_SNAKE_CASE=0xe0_01 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=16_384 , __SCREAMING_SNAKE_CASE=128 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : int = type_vocab_size
UpperCamelCase : List[str] = layer_norm_eps
# Character config:
UpperCamelCase : List[Any] = downsampling_rate
UpperCamelCase : Optional[Any] = upsampling_kernel_size
UpperCamelCase : List[Any] = num_hash_functions
UpperCamelCase : Tuple = num_hash_buckets
UpperCamelCase : Union[str, Any] = local_transformer_stride
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return getitem, k
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return setitem, k, v
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
return delitem, k
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , *SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
try:
return fun(__a , *__a ), None
except Exception as e:
return None, e
__UpperCAmelCase : Optional[Any] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase : int = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase : int = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase : int = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = HashMap(initial_block_size=4 )
UpperCamelCase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(__a ):
UpperCamelCase : List[Any] = _run_operation(__a , __a , *__a )
UpperCamelCase : List[Any] = _run_operation(__a , __a , *__a )
assert my_res == py_res
assert str(__a ) == str(__a )
assert set(__a ) == set(__a )
assert len(__a ) == len(__a )
assert set(my.items() ) == set(py.items() )
def a ( ):
"""simple docstring"""
def is_public(SCREAMING_SNAKE_CASE_ : str ) -> bool:
return not name.startswith('''_''' )
UpperCamelCase : List[Any] = {name for name in dir({} ) if is_public(__a )}
UpperCamelCase : Tuple = {name for name in dir(HashMap() ) if is_public(__a )}
assert dict_public_names > hash_public_names
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=a_ , large=a_ , share_emb=a_ , use_bert_emb=a_ , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCamelCase : Dict = torch.load(a_ , lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : storage )
UpperCamelCase : Tuple = AbsSummarizer(a_ , torch.device('''cpu''' ) , a_ )
original.eval()
UpperCamelCase : Dict = BertAbsSummarizer(a_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCamelCase : Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCamelCase : List[Any] = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
UpperCamelCase : Union[str, Any] = torch.tensor(a_ ).unsqueeze(0 )
UpperCamelCase : List[str] = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
UpperCamelCase : Any = torch.tensor(a_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCamelCase : Union[str, Any] = encoder_input_ids
UpperCamelCase : Optional[int] = decoder_input_ids
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
UpperCamelCase : List[str] = None
UpperCamelCase : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCamelCase : Tuple = original(a_ , a_ , a_ , a_ , a_ , a_ , a_ )[0]
UpperCamelCase : str = original.generator(a_ )
UpperCamelCase : int = new_model(
a_ , a_ , a_ , a_ , a_ )[0]
UpperCamelCase : Union[str, Any] = new_model.generator(a_ )
UpperCamelCase : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_ ) )
UpperCamelCase : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_ ) )
UpperCamelCase : Dict = torch.allclose(a_ , a_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCAmelCase_ ( _UpperCamelCase):
'''simple docstring'''
__UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCamelCase : Optional[List[bool]]
__UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Dict = 0 ):
"""simple docstring"""
UpperCamelCase : int = right or len(a__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a__ , a__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase_ ( __UpperCamelCase, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = CpmAntTokenizer
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Optional[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
UpperCamelCase : Any = """今天天气真好!"""
UpperCamelCase : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCamelCase : int = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Dict = """今天天气真好!"""
UpperCamelCase : List[Any] = [tokenizer.bos_token] + tokens
UpperCamelCase : List[str] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
UpperCamelCase : Any = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : List[Any] = {}
if top_k is not None:
UpperCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return super().__call__(lowercase_ , **lowercase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = load_image(lowercase_ )
UpperCamelCase : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
return model_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase : Dict = probs.topk(lowercase_ )
elif self.framework == "tf":
UpperCamelCase : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCamelCase : List[str] = tf.math.top_k(lowercase_ , k=lowercase_ )
UpperCamelCase : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase : Union[str, Any] = scores.tolist()
UpperCamelCase : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LDMTextToImagePipeline
__UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase : List[str] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase : Any = CLIPTextModel(_a )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_a ).startswith('''mps''' ):
UpperCamelCase : Optional[int] = torch.manual_seed(_a )
else:
UpperCamelCase : List[str] = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Any = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs(_a )
UpperCamelCase : Any = pipe(**_a ).images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCamelCase : List[str] = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : Dict = torch.manual_seed(_a )
UpperCamelCase : Union[str, Any] = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
UpperCamelCase : Any = torch.from_numpy(_a ).to(device=_a , dtype=_a )
UpperCamelCase : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase : str = self.get_inputs(_a )
UpperCamelCase : Any = pipe(**_a ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCamelCase : Union[str, Any] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
UpperCamelCase : List[str] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : str = torch.manual_seed(_a )
UpperCamelCase : Optional[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
UpperCamelCase : List[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase : int = self.get_inputs(_a )
UpperCamelCase : List[str] = pipe(**_a ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
UpperCamelCase : Optional[Any] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from math import factorial
__UpperCAmelCase : str = {str(digit): factorial(digit) for digit in range(10)}
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCamelCase ) )
def a ( SCREAMING_SNAKE_CASE_ : Any = 6_0 , SCREAMING_SNAKE_CASE_ : Any = 1_0_0_0_0_0_0 ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase : str = 0
# the cached sizes of the previous chains
UpperCamelCase : dict[int, int] = {}
for start_chain_element in range(1 , __lowerCamelCase ):
# The temporary set will contain the elements of the chain
UpperCamelCase : Optional[Any] = set()
UpperCamelCase : Optional[int] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase : Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCamelCase )
chain_set_length += 1
UpperCamelCase : str = digit_factorial_sum(__lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
from math import factorial
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE_):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["torch", "torchsde"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=SCREAMING_SNAKE_CASE_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=SCREAMING_SNAKE_CASE_ , default=5 )
parser.add_argument('''--batch_size''' , type=SCREAMING_SNAKE_CASE_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument('''--freeze''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--learning_rate''' , type=SCREAMING_SNAKE_CASE_ , default=5E-4 )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=SCREAMING_SNAKE_CASE_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=SCREAMING_SNAKE_CASE_ , default=1_0 )
parser.add_argument('''--weight_decay''' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''./results''' )
return parser.parse_args()
__UpperCAmelCase : Tuple = load("accuracy")
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = eval_pred
UpperCamelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase = trainer
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if control.should_evaluate:
UpperCamelCase = deepcopy(__UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def a ( ):
"""simple docstring"""
UpperCamelCase = get_args()
set_seed(args.seed )
UpperCamelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
UpperCamelCase = dataset.train_test_split(test_size=0.2 )
UpperCamelCase = train_test['''test'''].train_test_split(test_size=0.5 )
UpperCamelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase = tokenizer.eos_token
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCamelCase = False
UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
UpperCamelCase = tokenizer(example['''src'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_0_2_4 )
UpperCamelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCamelCase = train_test_validation.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=train_test_validation['''train'''].column_names , )
UpperCamelCase = DataCollatorWithPadding(tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
UpperCamelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(SCREAMING_SNAKE_CASE_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase : str = mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCamelCase : Union[str, Any] = max(
mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) , mf_knapsack(i - 1 , snake_case_ , snake_case_ , j - wt[i - 1] ) + val[i - 1] , )
UpperCamelCase : List[str] = val
return f[i][j]
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
UpperCamelCase : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCamelCase : Dict = dp[i - 1][w_]
return dp[n][w_], dp
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ):
if not (isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCamelCase : int = len(snake_case_ )
if num_items != len(snake_case_ ):
UpperCamelCase : Any = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(snake_case_ )} values"""
)
raise ValueError(snake_case_ )
for i in range(snake_case_ ):
if not isinstance(wt[i] , snake_case_ ):
UpperCamelCase : List[Any] = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(snake_case_ )
UpperCamelCase : Union[str, Any] = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase : set = set()
_construct_solution(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return optimal_val, example_optional_set
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case_ , snake_case_ , i - 1 , snake_case_ , snake_case_ )
else:
optimal_set.add(snake_case_ )
_construct_solution(snake_case_ , snake_case_ , i - 1 , j - wt[i - 1] , snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase : str = [3, 2, 4, 4]
__UpperCAmelCase : Optional[int] = [4, 3, 2, 3]
__UpperCAmelCase : Any = 4
__UpperCAmelCase : List[str] = 6
__UpperCAmelCase : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__UpperCAmelCase : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__UpperCAmelCase : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( __UpperCamelCase):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__SCREAMING_SNAKE_CASE ) )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = [sequences]
UpperCamelCase : Union[str, Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCamelCase)
class UpperCAmelCase_ ( __UpperCamelCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=ZeroShotClassificationArgumentHandler() , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = args_parser
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def _lowercase ( self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=TruncationStrategy.ONLY_FIRST , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
UpperCamelCase : List[Any] = self.tokenizer.eos_token
try:
UpperCamelCase : List[str] = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(__SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase : Union[str, Any] = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if kwargs.get('''multi_class''' , __SCREAMING_SNAKE_CASE ) is not None:
UpperCamelCase : Tuple = kwargs["""multi_class"""]
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
UpperCamelCase : str = {}
if "candidate_labels" in kwargs:
UpperCamelCase : Optional[int] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
UpperCamelCase : Optional[int] = kwargs["""hypothesis_template"""]
UpperCamelCase : Dict = {}
if "multi_label" in kwargs:
UpperCamelCase : Dict = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(__SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase : int = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This example is {}." ):
"""simple docstring"""
UpperCamelCase : Dict = self._args_parser(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = inputs["""candidate_label"""]
UpperCamelCase : str = inputs["""sequence"""]
UpperCamelCase : Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase : Optional[int] = self.model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCamelCase : str = [outputs["""sequence"""] for outputs in model_outputs]
UpperCamelCase : Dict = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
UpperCamelCase : Tuple = logits.shape[0]
UpperCamelCase : List[str] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = N // n
UpperCamelCase : Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase : Optional[Any] = self.entailment_id
UpperCamelCase : Optional[Any] = -1 if entailment_id == 0 else 0
UpperCamelCase : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase : Dict = np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase : Any = reshaped_outputs[..., self.entailment_id]
UpperCamelCase : Optional[int] = np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__UpperCAmelCase : Tuple = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def count_of_possible_combinations(SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase : Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE_ )
for item in array )
UpperCamelCase : Union[str, Any] = answer
return answer
UpperCamelCase : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Tuple = [0] * (target + 1)
UpperCamelCase : List[Any] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Optional[int] = 5
__UpperCAmelCase : Any = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : int = ''
__UpperCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCamelCase : str = None # compression type in fsspec. ex: "gzip"
__UpperCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , __SCREAMING_SNAKE_CASE = "" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(self , **a_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase : Union[str, Any] = fsspec.open(
a_ , mode='''rb''' , protocol=a_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
UpperCamelCase : List[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
UpperCamelCase : int = None
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return super()._strip_protocol(a_ ).lstrip('''/''' )
def _lowercase ( self ):
"""simple docstring"""
if self.dir_cache is None:
UpperCamelCase : Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
UpperCamelCase : Union[str, Any] = {f['''name''']: f}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.file.open().read()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "rb" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = self._strip_protocol(a_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'""" )
return self.file.open()
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : Optional[int] = 'bz2'
__UpperCamelCase : List[Any] = 'bz2'
__UpperCamelCase : Optional[int] = '.bz2'
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 'gzip'
__UpperCamelCase : Any = 'gzip'
__UpperCamelCase : List[Any] = '.gz'
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : List[str] = 'lz4'
__UpperCamelCase : int = 'lz4'
__UpperCamelCase : Optional[int] = '.lz4'
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 'xz'
__UpperCamelCase : Optional[Any] = 'xz'
__UpperCamelCase : Optional[Any] = '.xz'
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : List[str] = 'zstd'
__UpperCamelCase : int = 'zstd'
__UpperCamelCase : Union[str, Any] = '.zst'
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "rb" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = DEFAULT_BLOCK_SIZE , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
fo=a_ , mode=a_ , target_protocol=a_ , target_options=a_ , block_size=a_ , **a_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase : int = self.file.__enter__
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self._file.__exit__(*a_ , **a_ )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def _lowercase ( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return getattr(self._file , a_ )
def fixed_enter(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return WrappedFile(_enter(*a_ , **a_ ) )
UpperCamelCase : Union[str, Any] = fixed_enter
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase : List[str] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase : int = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
__UpperCAmelCase : Optional[Any] = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"""emoji""": True,
},
}
]
__UpperCAmelCase : Tuple = 0
for log in Path().glob("*.log"):
__UpperCAmelCase : str = 0
with open(log, "r") as f:
for line in f:
__UpperCAmelCase : Any = json.loads(line)
if line.get("nodeid", "") != "":
__UpperCAmelCase : List[str] = line["""nodeid"""]
if line.get("duration", None) is not None:
__UpperCAmelCase : Optional[int] = f'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase : List[str] = []
log.unlink()
__UpperCAmelCase : Union[str, Any] = """"""
__UpperCAmelCase : Tuple = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[Any] = {}
for test in failed_tests:
__UpperCAmelCase : Dict = test[0].split("::")
__UpperCAmelCase : int = data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCAmelCase : Union[str, Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase : List[str] = [test[0] for test in failed_table]
__UpperCAmelCase : Dict = list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase : Optional[int] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase : int = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCAmelCase : int = """Too many failed tests, please see the full report in the Action results."""
__UpperCAmelCase : List[str] = len(err) + 10
__UpperCAmelCase : Any = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__UpperCAmelCase : int = """No failed tests! 🤗"""
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCAmelCase : Dict = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCAmelCase : int = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
__UpperCAmelCase : Tuple = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__UpperCAmelCase : Union[str, Any] = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__UpperCAmelCase : str = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCAmelCase : Tuple = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase : str = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase : List[str] = row[0]
else:
__UpperCAmelCase : Optional[Any] = """"""
__UpperCAmelCase : Any = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
from functools import lru_cache
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(a__ )
if n > 1:
factors.add(a__ )
return factors
@lru_cache
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
return len(unique_prime_factors(a__ ) )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return len(set(a__ ) ) in (0, 1)
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCamelCase : Optional[int] = [base + i for i in range(a__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase : List[Any] = [upf_len(a__ ) for x in group]
checker.append(a__ )
# If all numbers in the list are equal, return the group variable.
if equality(a__ ):
return group
# Increment our base variable by 1
base += 1
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] = 4 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = run(a__ )
return results[0] if len(a__ ) else None
if __name__ == "__main__":
print(solution())
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] = 5_0_0_0 ):
"""simple docstring"""
UpperCamelCase : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ , len(lowercase_ ) ):
UpperCamelCase : Dict = pentagonal_nums[j]
UpperCamelCase : List[str] = pentagonal_i + pentagonal_j
UpperCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
try:
with open(UpperCamelCase__ , '''rb''' ) as flax_state_f:
UpperCamelCase : Optional[Any] = from_bytes(UpperCamelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase__ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCamelCase : List[str] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE_ : x.dtype == jnp.bfloataa , UpperCamelCase__ ) ).values()
if any(UpperCamelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCamelCase : Optional[int] = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase__ )
UpperCamelCase : Optional[Any] = ''''''
UpperCamelCase : List[str] = flatten_dict(UpperCamelCase__ , sep='''.''' )
UpperCamelCase : str = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase : Any = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase : List[Any] = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase : Optional[int] = jnp.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase : str = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase : Optional[Any] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase__ ):
UpperCamelCase : Dict = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
UpperCamelCase : str = '''.'''.join(UpperCamelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCamelCase : Any = np.asarray(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , np.ndarray ) else flax_tensor
UpperCamelCase : Dict = torch.from_numpy(UpperCamelCase__ )
# remove from missing keys
missing_keys.remove(UpperCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase__ )
pt_model.load_state_dict(UpperCamelCase__ )
# re-transform missing_keys to list
UpperCamelCase : Dict = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(UpperCamelCase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCAmelCase : Optional[int] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
__UpperCAmelCase : str = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
__UpperCAmelCase : Any = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return float((preds == labels).mean() )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="binary" ):
"""simple docstring"""
UpperCamelCase : str = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : str = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase , average=_UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[Any] = {}
for id_pred, label in zip(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase : Tuple = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCamelCase : Union[str, Any] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase : Union[str, Any] = [(pred, label)]
UpperCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCamelCase : Dict = zip(*_UpperCAmelCase )
UpperCamelCase : Any = fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase , average='''macro''' )
fas.append(_UpperCAmelCase )
UpperCamelCase : Any = int(sum(pred == label for pred, label in preds_labels ) == len(_UpperCAmelCase ) )
ems.append(_UpperCAmelCase )
UpperCamelCase : Any = float(sum(_UpperCAmelCase ) / len(_UpperCAmelCase ) )
UpperCamelCase : Optional[int] = sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
UpperCamelCase : Union[str, Any] = float(fa_score(y_true=_UpperCAmelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCamelCase : Optional[Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase : str = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCAmelCase_ , UpperCAmelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : str="pt" ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(''' ''' ) else {}
UpperCamelCase : str = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=None , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( lowerCamelCase_):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="train" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="" , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
UpperCamelCase : Optional[int] = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
UpperCamelCase : Optional[Any] = self.get_char_lens(self.src_file )
UpperCamelCase : List[str] = max_source_length
UpperCamelCase : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
UpperCamelCase : Optional[Any] = tokenizer
UpperCamelCase : Optional[Any] = prefix
if n_obs is not None:
UpperCamelCase : List[Any] = self.src_lens[:n_obs]
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : int = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = index + 1 # linecache starts at 1
UpperCamelCase : int = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
UpperCamelCase : str = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
UpperCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
UpperCamelCase : int = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
UpperCamelCase : Union[str, Any] = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
UpperCamelCase : Union[str, Any] = source_inputs['''input_ids'''].squeeze()
UpperCamelCase : str = target_inputs['''input_ids'''].squeeze()
UpperCamelCase : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = torch.stack([x['''input_ids'''] for x in batch] )
UpperCamelCase : Union[str, Any] = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCamelCase : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCamelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : List[Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase , UpperCamelCase : List[Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
UpperCamelCase : Any = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__UpperCAmelCase : Optional[int] = getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Tuple = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''git_log.json''' ) )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , **SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'''repo_id''': str(SCREAMING_SNAKE_CASE_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Iterable ):
"""simple docstring"""
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE_ : Tuple ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ : str ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ : Dict ):
UpperCamelCase : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : str = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
UpperCamelCase : Any = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
UpperCamelCase : Dict = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : str = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : str = '''dropout_rate'''
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
UpperCamelCase : str = p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_accelerate_available():
return method
UpperCamelCase : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__UpperCAmelCase : int = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=None ):
"""simple docstring"""
if rng is None:
UpperCamelCase : Union[str, Any] = random.Random()
UpperCamelCase : List[str] = 1
for dim in shape:
total_dims *= dim
UpperCamelCase : str = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase : Any = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase : List[str] = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
UpperCamelCase : Optional[int] = 1
return attn_mask
@require_flax
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Optional[int] = ()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase : str = 2
UpperCamelCase : Optional[Any] = inputs['''input_ids'''].shape[-1] // 2
UpperCamelCase : str = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCamelCase : Dict = jnp.ones_like(_lowerCamelCase )
UpperCamelCase : Dict = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase : List[str] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCamelCase : List[str] = False
UpperCamelCase : int = max_length
UpperCamelCase : str = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase : str = model_class(_lowerCamelCase )
UpperCamelCase : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase : List[Any] = pt_model_class(_lowerCamelCase ).eval()
UpperCamelCase : int = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params )
UpperCamelCase : Any = flax_model.generate(_lowerCamelCase ).sequences
UpperCamelCase : List[Any] = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase : int = model_class(_lowerCamelCase )
UpperCamelCase : Any = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : Union[str, Any] = jit(model.generate )
UpperCamelCase : Optional[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = self._get_input_ids_and_config()
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase : List[str] = model_class(_lowerCamelCase )
UpperCamelCase : Dict = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : List[Any] = jit(model.generate )
UpperCamelCase : str = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = self._get_input_ids_and_config()
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = max_length
UpperCamelCase : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase : Optional[Any] = model_class(_lowerCamelCase )
UpperCamelCase : List[Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : str = jit(model.generate )
UpperCamelCase : Tuple = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self._get_input_ids_and_config()
UpperCamelCase : str = False
UpperCamelCase : int = max_length
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Any = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase : Optional[Any] = model_class(_lowerCamelCase )
UpperCamelCase : Optional[int] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = self._get_input_ids_and_config()
UpperCamelCase : List[Any] = True
UpperCamelCase : Dict = max_length
UpperCamelCase : Optional[Any] = 0.8
UpperCamelCase : Any = 10
UpperCamelCase : Optional[int] = 0.3
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = 8
UpperCamelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase : List[str] = model_class(_lowerCamelCase )
UpperCamelCase : int = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : int = jit(model.generate )
UpperCamelCase : Tuple = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = self._get_input_ids_and_config()
UpperCamelCase : int = max_length
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : List[str] = 8
UpperCamelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase : Optional[Any] = model_class(_lowerCamelCase )
UpperCamelCase : List[str] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : Optional[int] = jit(model.generate )
UpperCamelCase : Dict = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCamelCase : Tuple = max_length
UpperCamelCase : List[Any] = 2
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = 8
UpperCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase : Optional[int] = model_class(_lowerCamelCase )
UpperCamelCase : Union[str, Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : str = jit(model.generate )
UpperCamelCase : Optional[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase : Tuple = False
UpperCamelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase : str = model_class(_lowerCamelCase )
UpperCamelCase : Optional[int] = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : Any = jit(model.generate )
UpperCamelCase : Tuple = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase : Dict = True
UpperCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase : Union[str, Any] = model_class(_lowerCamelCase )
UpperCamelCase : int = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : Dict = jit(model.generate )
UpperCamelCase : List[str] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase : Any = 2
UpperCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase : Union[str, Any] = model_class(_lowerCamelCase )
UpperCamelCase : Optional[Any] = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
UpperCamelCase : Optional[int] = jit(model.generate )
UpperCamelCase : str = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCamelCase : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase : Dict = '''Hello world'''
UpperCamelCase : Any = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ):
model.generate(_lowerCamelCase , do_samples=_lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ):
UpperCamelCase : str = {'''foo''': '''bar'''}
model.generate(_lowerCamelCase , **_lowerCamelCase )
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "decision_transformer"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Any = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __SCREAMING_SNAKE_CASE=17 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=50_256 , __SCREAMING_SNAKE_CASE=50_256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = state_dim
UpperCamelCase : Optional[Any] = act_dim
UpperCamelCase : Any = hidden_size
UpperCamelCase : Union[str, Any] = max_ep_len
UpperCamelCase : Optional[int] = action_tanh
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Tuple = n_positions
UpperCamelCase : Dict = n_layer
UpperCamelCase : Optional[int] = n_head
UpperCamelCase : Any = n_inner
UpperCamelCase : Union[str, Any] = activation_function
UpperCamelCase : List[str] = resid_pdrop
UpperCamelCase : str = embd_pdrop
UpperCamelCase : List[str] = attn_pdrop
UpperCamelCase : Any = layer_norm_epsilon
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Union[str, Any] = scale_attn_weights
UpperCamelCase : Tuple = use_cache
UpperCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
UpperCamelCase : Optional[int] = reorder_and_upcast_attn
UpperCamelCase : List[str] = bos_token_id
UpperCamelCase : Any = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCAmelCase : List[str] = logging.getLogger(__name__)
__UpperCAmelCase : Union[str, Any] = tf.data.AUTOTUNE
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE_ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE_ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE_ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE_ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE_ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE_ , default=2**1_8 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE_ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE_ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE_ , default=5_1_2 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE_ , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE_ , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCamelCase : str = parser.parse_args()
return args
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCamelCase : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE_ )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE_ )
return tpu
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
for file in file_list:
UpperCamelCase : str = file.split('''/''' )[-1]
UpperCamelCase : List[Any] = re.search(R'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE_ ).group(1 )
UpperCamelCase : Dict = int(SCREAMING_SNAKE_CASE_ )
num_samples += sample_count
return num_samples
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None ):
"""simple docstring"""
UpperCamelCase : Any = count_samples(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE_ )
if shuffle:
UpperCamelCase : int = dataset.shuffle(len(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE_ , num_parallel_reads=SCREAMING_SNAKE_CASE_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase : Dict = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Any = dataset.map(SCREAMING_SNAKE_CASE_ , num_parallel_calls=SCREAMING_SNAKE_CASE_ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase : Dict = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase : Tuple = dataset.batch(SCREAMING_SNAKE_CASE_ , drop_remainder=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , num_parallel_calls=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = dataset.prefetch(SCREAMING_SNAKE_CASE_ )
return dataset
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if not args.no_tpu:
UpperCamelCase : Any = initialize_tpu(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase : int = tokenizer.vocab_size
UpperCamelCase : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
UpperCamelCase : List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
UpperCamelCase : int = count_samples(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase : int = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase : Tuple = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase : int = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE_ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE_ , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
UpperCamelCase : List[Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase : List[Any] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE_ , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE_ : Tuple ):
# TF really needs an isin() function
UpperCamelCase : str = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCamelCase : str = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE_ , )
return batch
UpperCamelCase : List[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase : Tuple = prepare_dataset(
SCREAMING_SNAKE_CASE_ , decode_fn=SCREAMING_SNAKE_CASE_ , mask_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase : Union[str, Any] = prepare_dataset(
SCREAMING_SNAKE_CASE_ , decode_fn=SCREAMING_SNAKE_CASE_ , mask_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Any = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE_ ) )
model.fit(
SCREAMING_SNAKE_CASE_ , validation_data=SCREAMING_SNAKE_CASE_ , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCAmelCase : Any = parse_args()
main(args)
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowerCamelCase):
'''simple docstring'''
__UpperCamelCase : Tuple = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Optional[Any] = """CLIPImageProcessor"""
__UpperCamelCase : int = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
UpperCamelCase : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase : Optional[int] = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
UpperCamelCase : List[Any] = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
UpperCamelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.tokenizer.model_input_names
UpperCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__UpperCAmelCase : Optional[Any] = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
__UpperCAmelCase : Optional[int] = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
UpperCamelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
UpperCamelCase : List[str] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
UpperCamelCase : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
UpperCamelCase : Tuple = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , _UpperCAmelCase )
# ffn -> feed_forward
UpperCamelCase : List[str] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
UpperCamelCase : List[Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
UpperCamelCase : Optional[int] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
UpperCamelCase : Optional[int] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
UpperCamelCase : List[str] = 'rwkv.' + name
UpperCamelCase : int = weight
return state_dict
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
UpperCamelCase : Dict = 5_0_2_7_7
UpperCamelCase : Any = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
UpperCamelCase : str = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
UpperCamelCase : List[Any] = len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
UpperCamelCase : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCamelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCamelCase : Tuple = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
UpperCamelCase : Tuple = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : List[str] = torch.load(_UpperCAmelCase , map_location='''cpu''' )
UpperCamelCase : int = convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
UpperCamelCase : Any = shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
UpperCamelCase : Any = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase : str = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '\n'
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
UpperCamelCase : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCamelCase : Optional[Any] = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Any = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = inputs['''prompt''']
UpperCamelCase : Optional[Any] = inputs['''generator''']
UpperCamelCase : int = inputs['''num_inference_steps''']
UpperCamelCase : Union[str, Any] = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase : Tuple = inputs['''image''']
else:
UpperCamelCase : Dict = None
if "mask_image" in inputs:
UpperCamelCase : Optional[Any] = inputs['''mask_image''']
else:
UpperCamelCase : Optional[int] = None
if "original_image" in inputs:
UpperCamelCase : List[Any] = inputs['''original_image''']
else:
UpperCamelCase : str = None
UpperCamelCase , UpperCamelCase : Tuple = pipe.encode_prompt(__SCREAMING_SNAKE_CASE )
# inputs with prompt converted to embeddings
UpperCamelCase : Tuple = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : Dict = image
if mask_image is not None:
UpperCamelCase : Tuple = mask_image
if original_image is not None:
UpperCamelCase : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCamelCase : Dict = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = inputs['''generator''']
UpperCamelCase : List[str] = inputs['''num_inference_steps''']
UpperCamelCase : str = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase : int = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : Tuple = image
if mask_image is not None:
UpperCamelCase : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase : List[Any] = original_image
UpperCamelCase : Any = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : str = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Dict = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = pd.read_csv("sample_data.csv", header=None)
__UpperCAmelCase : Optional[int] = df.shape[:1][0]
# If you're using some other dataset input the target column
__UpperCAmelCase : str = df.iloc[:, 1:2]
__UpperCAmelCase : List[str] = actual_data.values.reshape(len_data, 1)
__UpperCAmelCase : Dict = MinMaxScaler().fit_transform(actual_data)
__UpperCAmelCase : Optional[Any] = 10
__UpperCAmelCase : List[Any] = 5
__UpperCAmelCase : List[Any] = 20
__UpperCAmelCase : Any = len_data - periods * look_back
__UpperCAmelCase : Union[str, Any] = actual_data[:division]
__UpperCAmelCase : str = actual_data[division - look_back :]
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = [], []
__UpperCAmelCase , __UpperCAmelCase : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__UpperCAmelCase : Dict = np.array(train_x)
__UpperCAmelCase : Tuple = np.array(test_x)
__UpperCAmelCase : List[Any] = np.array([list(i.ravel()) for i in train_y])
__UpperCAmelCase : Any = np.array([list(i.ravel()) for i in test_y])
__UpperCAmelCase : List[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__UpperCAmelCase : Dict = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__UpperCAmelCase : Tuple = model.predict(x_test)
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ):
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int | str] ):
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def a ( SCREAMING_SNAKE_CASE_ : list[int | str] , SCREAMING_SNAKE_CASE_ : list[int | str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , ):
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase : List[str] = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
UpperCamelCase : Dict = False
__UpperCAmelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCAmelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
__UpperCAmelCase : str = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__UpperCAmelCase : Optional[int] = {value: key for key, value in encode_dict.items()}
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
UpperCamelCase : Dict = ''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase : Union[str, Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : str = False
try:
from torch.hub import _get_torch_home
__UpperCAmelCase : Dict = _get_torch_home()
except ImportError:
__UpperCAmelCase : Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__UpperCAmelCase : Optional[int] = os.path.join(torch_cache_home, "transformers")
__UpperCAmelCase : Optional[int] = "https://cdn.huggingface.co"
__UpperCAmelCase : str = "https://s3.amazonaws.com/models.huggingface.co/bert"
__UpperCAmelCase : List[str] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
__UpperCAmelCase : int = os.path.join(PATH, "config.yaml")
__UpperCAmelCase : Optional[Any] = os.path.join(PATH, "attributes.txt")
__UpperCAmelCase : Optional[int] = os.path.join(PATH, "objects.txt")
__UpperCAmelCase : str = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__UpperCAmelCase : Optional[Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__UpperCAmelCase : List[str] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__UpperCAmelCase : int = "pytorch_model.bin"
__UpperCAmelCase : Union[str, Any] = "config.yaml"
def a ( SCREAMING_SNAKE_CASE_ : List[str]=OBJECTS , SCREAMING_SNAKE_CASE_ : Tuple=ATTRIBUTES ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
with open(SCREAMING_SNAKE_CASE_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
UpperCamelCase : List[Any] = []
with open(SCREAMING_SNAKE_CASE_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : int = OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
UpperCamelCase : Tuple = pkl.load(SCREAMING_SNAKE_CASE_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase : Optional[int] = ckp.pop(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
UpperCamelCase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
else:
assert isinstance(SCREAMING_SNAKE_CASE_ , torch.tensor ), type(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = v
return r
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Tuple = {}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "root" , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : Tuple = name
UpperCamelCase : Union[str, Any] = level
UpperCamelCase : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase : Optional[int] = copy.deepcopy(lowerCamelCase__ )
UpperCamelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase : Union[str, Any] = Config(lowerCamelCase__ , name=lowerCamelCase__ , level=level + 1 )
UpperCamelCase : int = v
setattr(self , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase : Tuple = d
def __repr__( self ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = val
UpperCamelCase : Dict = val
UpperCamelCase : Dict = key.split('''.''' )
UpperCamelCase : int = len(lowerCamelCase__ ) - 1
UpperCamelCase : Dict = self._pointer
if len(lowerCamelCase__ ) > 1:
for i, l in enumerate(lowerCamelCase__ ):
if hasattr(self , lowerCamelCase__ ) and isinstance(getattr(self , lowerCamelCase__ ) , lowerCamelCase__ ):
setattr(getattr(self , lowerCamelCase__ ) , '''.'''.join(levels[i:] ) , lowerCamelCase__ )
if l == last_level:
UpperCamelCase : Dict = val
else:
UpperCamelCase : List[str] = pointer[l]
def _lowercase ( self ):
"""simple docstring"""
return self._pointer
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(lowerCamelCase__ ) as stream:
UpperCamelCase : int = load(lowerCamelCase__ , Loader=lowerCamelCase__ )
return data
def __str__( self ):
"""simple docstring"""
UpperCamelCase : str = ''' '''
if self._name != "root":
UpperCamelCase : Dict = f"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCamelCase : Dict = ''''''
UpperCamelCase : int = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(lowerCamelCase__ ).__name__})\n"""
UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ )
return cls(lowerCamelCase__ )
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = kwargs.pop('''cache_dir''' , lowerCamelCase__ )
UpperCamelCase : List[str] = kwargs.pop('''force_download''' , lowerCamelCase__ )
UpperCamelCase : Optional[Any] = kwargs.pop('''resume_download''' , lowerCamelCase__ )
UpperCamelCase : int = kwargs.pop('''proxies''' , lowerCamelCase__ )
UpperCamelCase : Tuple = kwargs.pop('''local_files_only''' , lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ):
UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
elif os.path.isfile(lowerCamelCase__ ) or is_remote_url(lowerCamelCase__ ):
UpperCamelCase : Dict = pretrained_model_name_or_path
else:
UpperCamelCase : Optional[int] = hf_bucket_url(lowerCamelCase__ , filename=lowerCamelCase__ , use_cdn=lowerCamelCase__ )
try:
# Load from URL or cache if already cached
UpperCamelCase : List[str] = cached_path(
lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase : Dict = Config.load_yaml(lowerCamelCase__ )
except EnvironmentError:
UpperCamelCase : Optional[Any] = '''Can\'t load config for'''
raise EnvironmentError(lowerCamelCase__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(lowerCamelCase__ ), kwargs
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
UpperCamelCase : List[Any] = in_tensor.numpy()
UpperCamelCase : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = urlparse(SCREAMING_SNAKE_CASE_ )
return parsed.scheme in ("http", "https")
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase : int = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join('''{}/{}'''.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
UpperCamelCase : List[str] = {'''user-agent''': ua}
if resume_size > 0:
UpperCamelCase : Any = '''bytes=%d-''' % (resume_size,)
UpperCamelCase : List[str] = requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ )
if response.status_code == 4_1_6: # Range not satisfiable
return
UpperCamelCase : Tuple = response.headers.get('''Content-Length''' )
UpperCamelCase : Any = resume_size + int(SCREAMING_SNAKE_CASE_ ) if content_length is not None else None
UpperCamelCase : int = tqdm(
unit='''B''' , unit_scale=SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , initial=SCREAMING_SNAKE_CASE_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE_ ) )
temp_file.write(SCREAMING_SNAKE_CASE_ )
progress.close()
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : int=1_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : int=False , ):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = str(SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = None
if not local_files_only:
try:
UpperCamelCase : List[str] = requests.head(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ )
if response.status_code == 2_0_0:
UpperCamelCase : Tuple = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase : int = url_to_filename(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# get cache path to put the file
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
return cache_path
else:
UpperCamelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return os.path.join(SCREAMING_SNAKE_CASE_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase : int = cache_path + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE_ ):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase : Any = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE_ , '''a+b''' ) as f:
yield f
UpperCamelCase : List[str] = _resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = os.stat(SCREAMING_SNAKE_CASE_ ).st_size
else:
UpperCamelCase : Dict = 0
else:
UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE_ , delete=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , SCREAMING_SNAKE_CASE_ , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_size=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {'''url''': url, '''etag''': etag}
UpperCamelCase : Dict = cache_path + '''.json'''
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as meta_file:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cache_path
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int=None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = url.encode('''utf-8''' )
UpperCamelCase : Tuple = shaaaa(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = url_hash.hexdigest()
if etag:
UpperCamelCase : Optional[Any] = etag.encode('''utf-8''' )
UpperCamelCase : str = shaaaa(SCREAMING_SNAKE_CASE_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : List[str]=False , ):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase : Tuple = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = str(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = str(SCREAMING_SNAKE_CASE_ )
if is_remote_url(SCREAMING_SNAKE_CASE_ ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase : Optional[int] = get_from_cache(
SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
elif os.path.exists(SCREAMING_SNAKE_CASE_ ):
# File, and it exists.
UpperCamelCase : Optional[Any] = url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(SCREAMING_SNAKE_CASE_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(SCREAMING_SNAKE_CASE_ ) )
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE_ ) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase : Union[str, Any] = os.path.split(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
UpperCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase : Union[str, Any] = output_path + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ )
if is_zipfile(SCREAMING_SNAKE_CASE_ ):
with ZipFile(SCREAMING_SNAKE_CASE_ , '''r''' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(SCREAMING_SNAKE_CASE_ ) )
return output_path_extracted
return output_path
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]="," ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : str = eval(f.read() )
else:
UpperCamelCase : List[str] = requests.get(SCREAMING_SNAKE_CASE_ )
try:
UpperCamelCase : str = requests.json()
except Exception:
UpperCamelCase : int = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase : Dict = eval(SCREAMING_SNAKE_CASE_ )
except Exception:
UpperCamelCase : Any = data.split('''\n''' )
req.close()
return data
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Any = requests.get(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as stream:
UpperCamelCase : str = pkl.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = weights.pop('''model''' )
UpperCamelCase : Tuple = {}
for k, v in model.items():
UpperCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
if "running_var" in k:
UpperCamelCase : Optional[int] = torch.tensor([0] )
UpperCamelCase : Optional[int] = k.replace('''running_var''' , '''num_batches_tracked''' )
UpperCamelCase : Union[str, Any] = zero
return new
def a ( ):
"""simple docstring"""
print(F"""{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE_ , os.pardir ) )}/demo.ipynb""" )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int="RGB" ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = cva.imread(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = get_image_from_url(SCREAMING_SNAKE_CASE_ )
assert img is not None, F"""could not connect to: {im}"""
UpperCamelCase : Optional[int] = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase : Union[str, Any] = img[:, :, ::-1]
return img
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ))
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="pt" ):
"""simple docstring"""
UpperCamelCase : List[Any] = {'''add_prefix_space''': True} if isinstance(A__ , A__ ) and not line.startswith(''' ''' ) else {}
UpperCamelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=A__ , padding='''max_length''' if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=None , ):
"""simple docstring"""
UpperCamelCase : Any = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( lowerCamelCase__):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="train" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="" , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[str] = Path(lowercase__ ).joinpath(type_path + '''.source''' )
UpperCamelCase : Any = Path(lowercase__ ).joinpath(type_path + '''.target''' )
UpperCamelCase : Optional[int] = self.get_char_lens(self.src_file )
UpperCamelCase : Optional[Any] = max_source_length
UpperCamelCase : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
UpperCamelCase : Tuple = tokenizer
UpperCamelCase : List[Any] = prefix
if n_obs is not None:
UpperCamelCase : List[str] = self.src_lens[:n_obs]
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : Tuple = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = index + 1 # linecache starts at 1
UpperCamelCase : str = self.prefix + linecache.getline(str(self.src_file ) , lowercase__ ).rstrip('''\n''' )
UpperCamelCase : Union[str, Any] = linecache.getline(str(self.tgt_file ) , lowercase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : List[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
)
UpperCamelCase : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
UpperCamelCase : List[Any] = encode_line(lowercase__ , lowercase__ , self.max_source_length , '''right''' )
UpperCamelCase : Tuple = encode_line(lowercase__ , lowercase__ , self.max_target_length , '''right''' )
UpperCamelCase : Union[str, Any] = source_inputs['''input_ids'''].squeeze()
UpperCamelCase : List[str] = target_inputs['''input_ids'''].squeeze()
UpperCamelCase : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [len(lowercase__ ) for x in Path(lowercase__ ).open().readlines()]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
UpperCamelCase : str = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCamelCase : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCamelCase : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : List[str] = trim_batch(lowercase__ , lowercase__ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = trim_batch(lowercase__ , lowercase__ , attention_mask=lowercase__ )
UpperCamelCase : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__UpperCAmelCase : Optional[int] = getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(A__ ) )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : int = get_git_info()
save_json(A__ , os.path.join(A__ , '''git_log.json''' ) )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=4 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(A__ , '''w''' ) as f:
json.dump(A__ , A__ , indent=A__ , **A__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
with open(A__ ) as f:
return json.load(A__ )
def a ( ):
"""simple docstring"""
UpperCamelCase : str = git.Repo(search_parent_directories=A__ )
UpperCamelCase : List[str] = {
'''repo_id''': str(A__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return list(map(A__ , A__ ) )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
with open(A__ , '''wb''' ) as f:
return pickle.dump(A__ , A__ )
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , A__ )
def white_space_fix(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ : List[Any] ):
UpperCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : str = normalize_answer(A__ ).split()
UpperCamelCase : Tuple = normalize_answer(A__ ).split()
UpperCamelCase : List[Any] = Counter(A__ ) & Counter(A__ )
UpperCamelCase : Tuple = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : int = 1.0 * num_same / len(A__ )
UpperCamelCase : Any = 1.0 * num_same / len(A__ )
UpperCamelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
return normalize_answer(A__ ) == normalize_answer(A__ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
assert len(A__ ) == len(A__ )
UpperCamelCase : List[Any] = 0
for hypo, pred in zip(A__ , A__ ):
em += exact_match_score(A__ , A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : int = '''dropout_rate'''
for p in extra_params:
if getattr(A__ , A__ , A__ ):
if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A__ ) )
delattr(A__ , A__ )
continue
UpperCamelCase : Tuple = p if hasattr(A__ , A__ ) else equivalent_param[p]
setattr(A__ , A__ , getattr(A__ , A__ ) )
delattr(A__ , A__ )
return hparams, config
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = LayoutLMTokenizer
__UpperCamelCase : int = LayoutLMTokenizerFast
__UpperCamelCase : Tuple = True
__UpperCamelCase : int = True
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = 'UNwant\u00E9d,running'
UpperCamelCase : Optional[int] = 'unwanted, running'
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.tokenizer_class(self.vocab_file )
UpperCamelCase : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def _lowercase ( self ):
"""simple docstring"""
pass
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from itertools import permutations
def a ( SCREAMING_SNAKE_CASE_ : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase : Optional[Any] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(a__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0 ):
"""simple docstring"""
return sum(
int(''''''.join(map(a__ , a__ ) ) )
for num in permutations(range(a__ ) )
if is_substring_divisible(a__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ ( __lowercase, __lowercase):
'''simple docstring'''
__UpperCamelCase : str = '''nat'''
__UpperCamelCase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
UpperCamelCase : Dict = patch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : List[Any] = embed_dim
UpperCamelCase : List[Any] = depths
UpperCamelCase : str = len(UpperCAmelCase__ )
UpperCamelCase : List[str] = num_heads
UpperCamelCase : Optional[Any] = kernel_size
UpperCamelCase : Dict = mlp_ratio
UpperCamelCase : str = qkv_bias
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : str = drop_path_rate
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[int] = layer_norm_eps
UpperCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : Dict = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
UpperCamelCase : int = layer_scale_init_value
UpperCamelCase : Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 * 8 , __SCREAMING_SNAKE_CASE=32 * 8 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=64 , ):
"""simple docstring"""
UpperCamelCase : int = parent
UpperCamelCase : str = batch_size
UpperCamelCase : str = is_training
UpperCamelCase : int = use_auxiliary_loss
UpperCamelCase : Union[str, Any] = num_queries
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = min_size
UpperCamelCase : Tuple = max_size
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : Optional[int] = hidden_dim
UpperCamelCase : List[str] = hidden_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
UpperCamelCase : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
UpperCamelCase : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
UpperCamelCase : str = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase : Dict = self.num_queries
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : Dict = [1, 1, 1, 1]
UpperCamelCase : Optional[int] = self.num_channels
UpperCamelCase : List[str] = 64
UpperCamelCase : Tuple = 128
UpperCamelCase : List[str] = self.hidden_dim
UpperCamelCase : List[str] = self.hidden_dim
UpperCamelCase : int = self.hidden_dim
return config
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = output.encoder_hidden_states
UpperCamelCase : Tuple = output.pixel_decoder_hidden_states
UpperCamelCase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase : Optional[Any] = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
UpperCamelCase : Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
UpperCamelCase : Any = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase : Optional[int] = model(pixel_values=_a , pixel_mask=_a )
UpperCamelCase : List[Any] = model(_a )
comm_check_on_output(_a )
UpperCamelCase : Optional[Any] = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_, UpperCamelCase_, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCamelCase : Tuple = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = MaskaFormerModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(_a )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase : Any = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = (self.model_tester.min_size,) * 2
UpperCamelCase : Tuple = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 10 , device=_a ).long(),
}
UpperCamelCase : List[str] = self.model_tester.get_config()
UpperCamelCase : Dict = MaskaFormerForUniversalSegmentation(_a ).to(_a )
UpperCamelCase : Optional[int] = model(**_a )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(_a ).to(_a )
UpperCamelCase : int = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase : Dict = self.all_model_classes[1]
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : int = model_class(_a )
model.to(_a )
model.train()
UpperCamelCase : str = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.all_model_classes[1]
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : Tuple = True
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Optional[int] = model_class(_a ).to(_a )
model.train()
UpperCamelCase : Tuple = model(_a , mask_labels=_a , class_labels=_a )
UpperCamelCase : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase : str = 1E-4
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
UpperCamelCase : Optional[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : List[Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
UpperCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : str = model(**_a )
UpperCamelCase : int = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCamelCase : int = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCamelCase : int = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Any = image_processor(_a , return_tensors='''pt''' ).to(_a )
UpperCamelCase : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_a )
# masks_queries_logits
UpperCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase : int = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCamelCase : Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
UpperCamelCase : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : int = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCamelCase : Optional[int] = inputs["""pixel_values"""].to(_a )
UpperCamelCase : int = [el.to(_a ) for el in inputs["""mask_labels"""]]
UpperCamelCase : Union[str, Any] = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
UpperCamelCase : List[str] = 1
while len(lowercase__ ) < 1E6:
constant.append(str(lowercase__ ) )
i += 1
UpperCamelCase : Any = ''''''.join(lowercase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCamelCase : Tuple = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
UpperCamelCase : List[str] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
UpperCamelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE ) , labels=labels.to(_SCREAMING_SNAKE_CASE ) ).loss
UpperCamelCase : Optional[int] = -(labels.shape[-1] * loss.item())
UpperCamelCase : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_a , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_a , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_a , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_a , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_a , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_a , type=_a , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_a , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_a , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCamelCase : Dict = parser.parse_args()
return args
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
def fn(SCREAMING_SNAKE_CASE_ : int ):
return tokenizer(examples['''text'''] )
return fn
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : str = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCamelCase : Union[str, Any] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCamelCase : Tuple = tf.train.Features(feature=_a )
UpperCamelCase : str = tf.train.Example(features=_a )
UpperCamelCase : Dict = example.SerializeToString()
records.append(_a )
return records
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase : Dict = min(len(_a ) , args.limit )
UpperCamelCase : List[Any] = dataset.select(range(_a ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
UpperCamelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(_a ):
os.makedirs(_a )
else:
UpperCamelCase : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase : Any = tokenize_function(_a )
UpperCamelCase : str = dataset.map(_a , batched=_a , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ : List[Any] ):
# Concatenate all texts.
UpperCamelCase : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase : Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase : Tuple = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , _a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase : Tuple = dataset_tokenized.map(_a , batched=_a , batch_size=1_0_0_0 , num_proc=4 )
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : str = 0
for shard in range(0 , len(_a ) , args.shard_size ):
UpperCamelCase : Tuple = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase : List[Any] = len(dataset_snapshot['''input_ids'''] )
UpperCamelCase : List[Any] = os.path.join(_a , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCamelCase : Dict = get_serialized_examples(_a )
with tf.io.TFRecordWriter(_a ) as out_file:
for i in range(len(_a ) ):
UpperCamelCase : Optional[Any] = serialized_examples[i]
out_file.write(_a )
print('''Wrote file {} containing {} records'''.format(_a , _a ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=_a )
if __name__ == "__main__":
__UpperCAmelCase : Dict = parse_args()
main(args)
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : torch.FloatTensor
__UpperCamelCase : torch.FloatTensor
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
__UpperCamelCase : int = 1
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 2_000 , __SCREAMING_SNAKE_CASE = 0.15 , __SCREAMING_SNAKE_CASE = 0.01 , __SCREAMING_SNAKE_CASE = 1_348.0 , __SCREAMING_SNAKE_CASE = 1e-5 , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = sigma_max
# setable values
UpperCamelCase : Union[str, Any] = None
self.set_sigmas(_lowercase , _lowercase , _lowercase , _lowercase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase : Tuple = torch.linspace(1 , _lowercase , _lowercase , device=_lowercase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Any = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowercase , _lowercase )
UpperCamelCase : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase : Union[str, Any] = torch.exp(torch.linspace(math.log(_lowercase ) , math.log(_lowercase ) , _lowercase ) )
UpperCamelCase : List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase : List[str] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase : List[str] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase : int = self.get_adjacent_sigma(_lowercase , _lowercase ).to(sample.device )
UpperCamelCase : Any = torch.zeros_like(_lowercase )
UpperCamelCase : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase : Dict = diffusion.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase : Dict = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowercase , device=sample.device , dtype=sample.dtype )
UpperCamelCase : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowercase , prev_sample_mean=_lowercase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=_lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase : Optional[int] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : Dict = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : List[str] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase : str = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase : Optional[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase : Tuple = step_size.unsqueeze(-1 )
UpperCamelCase : Tuple = sample + step_size * model_output
UpperCamelCase : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[Any] = timesteps.to(original_samples.device )
UpperCamelCase : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowercase ) * sigmas[:, None, None, None]
)
UpperCamelCase : List[str] = noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase_ ( lowerCamelCase_):
'''simple docstring'''
__UpperCamelCase : List[Any] = """gpt_neox_japanese"""
def __init__( self , __SCREAMING_SNAKE_CASE=32_000 , __SCREAMING_SNAKE_CASE=2_560 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1.00 , __SCREAMING_SNAKE_CASE=10_000 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=31_996 , __SCREAMING_SNAKE_CASE=31_999 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase : str = vocab_size
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : int = intermediate_multiple_size
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : str = rotary_pct
UpperCamelCase : Dict = rotary_emb_base
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = use_cache
UpperCamelCase : int = attention_dropout
UpperCamelCase : Optional[int] = hidden_dropout
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = ["""key_proj""", """value_proj""", """query_proj"""]
UpperCamelCase : Dict = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
UpperCamelCase : int = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase : Union[str, Any] = prophet
UpperCamelCase : List[str] = prophet_old
else:
UpperCamelCase : List[str] = prophet.prophetnet
UpperCamelCase : Tuple = prophet_old.model
UpperCamelCase : Dict = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase : str = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Optional[int] = attribute
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
UpperCamelCase : int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase : Dict = old_model.bias
logger.info(F"""{attribute} is initialized""" )
UpperCamelCase : Optional[int] = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE_ , '''in_proj_weight''' ):
UpperCamelCase : Any = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
UpperCamelCase : Dict = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
UpperCamelCase : List[Any] = True
break
if attribute.isdigit():
UpperCamelCase : Dict = model[int(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : Any = old_model[int(SCREAMING_SNAKE_CASE_ )]
else:
UpperCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if old_attribute == "":
UpperCamelCase : int = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
UpperCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : List[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=17 , __SCREAMING_SNAKE_CASE=23 , __SCREAMING_SNAKE_CASE=11 , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : str = act_dim
UpperCamelCase : Optional[int] = state_dim
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = max_length
UpperCamelCase : Union[str, Any] = is_training
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : Tuple = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCamelCase : Union[str, Any] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase : Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = DecisionTransformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase : Any = model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Any = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase : Dict = ()
__UpperCamelCase : List[str] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = DecisionTransformerModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = DecisionTransformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(lowercase_ )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : int = [*signature.parameters.keys()]
UpperCamelCase : int = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(lowercase_ )] , lowercase_ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase : Any = 10 # defined by the RL environment, may be normalized
UpperCamelCase : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCamelCase : Union[str, Any] = model.to(lowercase_ )
UpperCamelCase : Tuple = model.config
torch.manual_seed(0 )
UpperCamelCase : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=lowercase_ , dtype=torch.floataa ) # env.reset()
UpperCamelCase : List[str] = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=lowercase_ )
UpperCamelCase : List[Any] = torch.tensor(lowercase_ , device=lowercase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase : Optional[int] = state
UpperCamelCase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=lowercase_ , dtype=torch.floataa )
UpperCamelCase : int = torch.zeros(1 , 0 , device=lowercase_ , dtype=torch.floataa )
UpperCamelCase : Optional[Any] = torch.tensor(0 , device=lowercase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowercase_ ):
UpperCamelCase : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase_ )] , dim=1 )
UpperCamelCase : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase_ )] , dim=1 )
UpperCamelCase : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = model(
states=lowercase_ , actions=lowercase_ , rewards=lowercase_ , returns_to_go=lowercase_ , timesteps=lowercase_ , attention_mask=lowercase_ , return_dict=lowercase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowercase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase : List[str] = action_pred[0, -1]
UpperCamelCase : Dict = torch.cat([states, state] , dim=1 )
UpperCamelCase : List[str] = returns_to_go[0, -1] - reward
UpperCamelCase : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase : Optional[int] = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowercase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase : Any = "Muhammad Umer Farooq"
__UpperCAmelCase : List[str] = "MIT"
__UpperCAmelCase : Any = "1.0.0"
__UpperCAmelCase : Optional[int] = "Muhammad Umer Farooq"
__UpperCAmelCase : Optional[Any] = "contact@muhammadumerfarooq.me"
__UpperCAmelCase : str = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase_ ( a__):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : List[str] = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_lowerCamelCase ).split('''.''' )[-2:] )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return parse.urlparse(_lowerCamelCase ).netloc
def a ( SCREAMING_SNAKE_CASE_ : str = "https://github.com" ):
"""simple docstring"""
UpperCamelCase : Optional[int] = get_domain_name(_lowerCamelCase )
# Initialize the parser
UpperCamelCase : List[str] = Parser(_lowerCamelCase )
try:
# Open URL
UpperCamelCase : Optional[Any] = requests.get(_lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCamelCase )
# Get the valid email.
UpperCamelCase : Any = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase : Dict = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = 42
__UpperCamelCase : Dict = None
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=0.999 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Dict ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : Optional[Any] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = i / num_diffusion_timesteps
UpperCamelCase : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 1_000 , __SCREAMING_SNAKE_CASE = "fixed_small_log" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
UpperCamelCase : List[Any] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
UpperCamelCase : int = 1.0 - self.betas
UpperCamelCase : Dict = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase : Union[str, Any] = 1.0
# setable values
UpperCamelCase : List[str] = None
UpperCamelCase : Tuple = torch.from_numpy(np.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() )
UpperCamelCase : Optional[int] = variance_type
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = num_inference_steps
UpperCamelCase : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase : Tuple = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase : int = t - 1
UpperCamelCase : str = self.alphas_cumprod[t]
UpperCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase : Dict = 1 - alpha_prod_t
UpperCamelCase : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase : Optional[Any] = self.betas[t]
else:
UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase : List[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase : Optional[int] = torch.log(torch.clamp(_SCREAMING_SNAKE_CASE , min=1e-20 ) )
UpperCamelCase : Optional[int] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase : Union[str, Any] = variance.log()
UpperCamelCase : Tuple = beta.log()
UpperCamelCase : List[str] = (predicted_variance + 1) / 2
UpperCamelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
UpperCamelCase : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase : List[str] = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
UpperCamelCase : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase : Tuple = t - 1
UpperCamelCase : List[str] = self.alphas_cumprod[t]
UpperCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase : Dict = 1 - alpha_prod_t
UpperCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase : Dict = self.betas[t]
UpperCamelCase : Union[str, Any] = self.alphas[t]
else:
UpperCamelCase : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase : Any = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase : Dict = torch.clamp(
_SCREAMING_SNAKE_CASE , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : str = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase : int = 0
if t > 0:
UpperCamelCase : List[str] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE , device=model_output.device )
UpperCamelCase : List[Any] = self._get_variance(
_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , )
if self.variance_type == "fixed_small_log":
UpperCamelCase : Any = variance
elif self.variance_type == "learned_range":
UpperCamelCase : Any = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
UpperCamelCase : Dict = variance * variance_noise
UpperCamelCase : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase : Optional[Any] = timesteps.to(original_samples.device )
UpperCamelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase : List[str] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0 ):
"""simple docstring"""
UpperCamelCase : str = (n * (n + 1) // 2) ** 2
UpperCamelCase : Dict = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import numpy as np
import datasets
__UpperCAmelCase : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
__UpperCAmelCase : List[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
__UpperCAmelCase : Optional[Any] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = np.array(_snake_case )
UpperCamelCase : Tuple = np.array(_snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCamelCase : Optional[Any] = X - np.mean(_snake_case )
UpperCamelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
UpperCamelCase : Optional[Any] = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
UpperCamelCase : str = np.linalg.pinv(_snake_case )
UpperCamelCase : Any = np.dot(_snake_case , _snake_case )
UpperCamelCase : Tuple = np.dot(_snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __get__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCamelCase : List[Any] = '''__cached_''' + self.fget.__name__
UpperCamelCase : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if cached is None:
UpperCamelCase : Optional[Any] = self.fget(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cached
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__ , np.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return isinstance(UpperCamelCase__ , np.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return _is_numpy(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase__ , torch.Tensor )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase__ , torch.device )
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__ , torch.dtype )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(UpperCamelCase__ , tf.Tensor )
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__ , jnp.ndarray )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCamelCase : Union[str, Any] = getattr(self , class_fields[0].name )
UpperCamelCase : List[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = first_field.items()
UpperCamelCase : Any = True
else:
try:
UpperCamelCase : int = iter(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = True
except TypeError:
UpperCamelCase : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__SCREAMING_SNAKE_CASE ):
if (
not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(__SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , __SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCamelCase : Any = element[1]
elif first_field is not None:
UpperCamelCase : Any = first_field
else:
for field in class_fields:
UpperCamelCase : Dict = getattr(self , field.name )
if v is not None:
UpperCamelCase : Optional[Any] = v
def __delitem__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __setitem__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = "longest"
__UpperCamelCase : List[Any] = "max_length"
__UpperCamelCase : List[str] = "do_not_pad"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = "pt"
__UpperCamelCase : str = "tf"
__UpperCamelCase : int = "np"
__UpperCamelCase : Union[str, Any] = "jax"
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = context_managers
UpperCamelCase : Optional[int] = ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__SCREAMING_SNAKE_CASE )
def __exit__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : List[str] = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = model_class.__name__
UpperCamelCase : Union[str, Any] = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a ( SCREAMING_SNAKE_CASE_ : MutableMapping , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : str = "." ):
"""simple docstring"""
def _flatten_dict(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int="" , SCREAMING_SNAKE_CASE_ : Optional[Any]="." ):
for k, v in d.items():
UpperCamelCase : Tuple = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__ , UpperCamelCase__ , delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
@contextmanager
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__ , perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(UpperCamelCase__ , (tuple, list) ):
UpperCamelCase : List[Any] = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase : str = F"""{repo_id}--{value}"""
return auto_map
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
for base_class in inspect.getmro(UpperCamelCase__ ):
UpperCamelCase : List[Any] = base_class.__module__
UpperCamelCase : Union[str, Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
import sys
__UpperCAmelCase : Dict = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[Any] = 1
for digit in s:
product *= int(UpperCAmelCase_ )
return product
def a ( SCREAMING_SNAKE_CASE_ : str = N ):
"""simple docstring"""
UpperCamelCase : List[str] = -sys.maxsize - 1
UpperCamelCase : Optional[int] = n[:1_3]
UpperCamelCase : Optional[Any] = 1_3
while cur_index < len(UpperCAmelCase_ ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase : Dict = max(UpperCAmelCase_ , str_eval(UpperCAmelCase_ ) )
UpperCamelCase : str = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0 , SCREAMING_SNAKE_CASE_ : int = 2_2 ):
"""simple docstring"""
UpperCamelCase = range(1 , _lowerCAmelCase )
UpperCamelCase = range(1 , _lowerCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
UpperCamelCase : Any = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
UpperCamelCase : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase : List[str] = logging.getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
UpperCamelCase : List[str] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCamelCase : Dict = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCamelCase : Any = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
UpperCamelCase : List[Any] = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class UpperCAmelCase_ ( pl.Callback):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase_ )
@rank_zero_only
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCamelCase : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase : Any = od / '''test_results.txt'''
UpperCamelCase : Any = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase : Optional[Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase : Tuple = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCamelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase : Optional[int] = metrics[key]
if isinstance(lowerCamelCase_ , torch.Tensor ):
UpperCamelCase : int = val.item()
UpperCamelCase : List[str] = f"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase_ )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase : Dict = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCamelCase_ )
@rank_zero_only
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
UpperCamelCase : Dict = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase : Optional[Any] = pl_module.model.num_parameters()
UpperCamelCase : Optional[Any] = count_trainable_parameters(lowerCamelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase_ , lowerCamelCase_ , '''test''' )
@rank_zero_only
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a ( *SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=2 ):
"""simple docstring"""
from .. import __version__
UpperCamelCase : List[Any] = take_from
UpperCamelCase : List[str] = ()
if not isinstance(args[0] , __UpperCAmelCase ):
UpperCamelCase : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse(__UpperCAmelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
UpperCamelCase : Union[str, Any] = None
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCAmelCase ),)
UpperCamelCase : Tuple = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__UpperCAmelCase , __UpperCAmelCase ):
values += (getattr(__UpperCAmelCase , __UpperCAmelCase ),)
UpperCamelCase : List[Any] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
UpperCamelCase : int = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
UpperCamelCase : str = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __UpperCAmelCase , stacklevel=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
UpperCamelCase : str = inspect.getouterframes(inspect.currentframe() )[1]
UpperCamelCase : Union[str, Any] = call_frame.filename
UpperCamelCase : Any = call_frame.lineno
UpperCamelCase : List[Any] = call_frame.function
UpperCamelCase , UpperCamelCase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__UpperCAmelCase ) == 0:
return
elif len(__UpperCAmelCase ) == 1:
return values[0]
return values
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCAmelCase : Tuple = logging.getLogger(__name__)
__UpperCAmelCase : Dict = {'''facebook/bart-base''': BartForConditionalGeneration}
__UpperCAmelCase : Union[str, Any] = {'''facebook/bart-base''': BartTokenizer}
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=lowercase_ , default=lowercase_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=lowercase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=lowercase_ , default=lowercase_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase_ , )
parser.add_argument(
'''--config_name''' , type=lowercase_ , default=lowercase_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=lowercase_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=lowercase_ , default=lowercase_ , help='''Where to store the final ONNX file.''' )
UpperCamelCase : Dict = parser.parse_args()
return args
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]="cpu" ):
"""simple docstring"""
UpperCamelCase : Optional[int] = model_dict[model_name].from_pretrained(lowercase_ ).to(lowercase_ )
UpperCamelCase : Any = tokenizer_dict[model_name].from_pretrained(lowercase_ )
if model_name in ["facebook/bart-base"]:
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[str] = None
UpperCamelCase : int = 0
return huggingface_model, tokenizer
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
model.eval()
UpperCamelCase : str = None
UpperCamelCase : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(lowercase_ ) )
with torch.no_grad():
UpperCamelCase : int = '''My friends are cool but they eat too many carbs.'''
UpperCamelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
UpperCamelCase : int = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=lowercase_ , max_length=lowercase_ , early_stopping=lowercase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase_ , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=lowercase_ , )
logger.info('''Model exported to {}'''.format(lowercase_ ) )
UpperCamelCase : Tuple = remove_dup_initializers(os.path.abspath(lowercase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(lowercase_ ) )
UpperCamelCase : int = onnxruntime.InferenceSession(lowercase_ )
UpperCamelCase : str = ort_sess.run(
lowercase_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(lowercase_ ),
'''max_length''': np.array(lowercase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = parse_args()
UpperCamelCase : Optional[int] = 5
UpperCamelCase : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase : Dict = torch.device(args.device )
UpperCamelCase , UpperCamelCase : Any = load_model_tokenizer(args.model_name_or_path , lowercase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(lowercase_ )
if args.max_length:
UpperCamelCase : Union[str, Any] = args.max_length
if args.num_beams:
UpperCamelCase : Optional[Any] = args.num_beams
if args.output_file_path:
UpperCamelCase : Any = args.output_file_path
else:
UpperCamelCase : Dict = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase : Union[str, Any] = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
UpperCamelCase : Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Optional[int] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCamelCase : Any = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCamelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCamelCase : Optional[int] = black.format_str(lowercase_ , mode=lowercase_ )
UpperCamelCase : Optional[Any] = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowercase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowercase_ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowercase_ , lowercase_ )
def _lowercase ( self ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowercase_ ) , )
# Copy consistency with a really long name
UpperCamelCase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowercase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowercase_ ) , )
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
from __future__ import annotations
__UpperCAmelCase : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCAmelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : int = []
UpperCamelCase : Dict = len(_lowercase )
for i in range(_lowercase ):
UpperCamelCase : Tuple = -1
for j in range(i + 1 , _lowercase ):
if arr[i] < arr[j]:
UpperCamelCase : Dict = arr[j]
break
result.append(_lowercase )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
for i, outer in enumerate(_lowercase ):
UpperCamelCase : Union[str, Any] = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase : Dict = inner
break
result.append(_lowercase )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : str = len(_lowercase )
UpperCamelCase : List[Any] = []
UpperCamelCase : Union[str, Any] = [-1] * arr_size
for index in reversed(range(_lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCAmelCase : List[str] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCAmelCase : str = "base_with_context"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCamelCase : Any = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase : List[Any] = weights[F"""layers_{lyr_num}"""]
UpperCamelCase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase : Optional[Any] = ly_weight["""attention"""]
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase : int = weights[F"""layers_{lyr_num}"""]
UpperCamelCase : List[str] = ly_weight["""attention"""]
UpperCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCamelCase : Dict = weights[F"""layers_{lyr_num}"""]
UpperCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = ly_weight["""self_attention"""]
UpperCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
UpperCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase : str = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCamelCase : int = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
UpperCamelCase : str = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
UpperCamelCase : Tuple = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
UpperCamelCase : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCamelCase : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCamelCase : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCamelCase : Dict = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = load_decoder(ta_checkpoint['''target''']['''decoder'''] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCamelCase : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE_ , continuous_encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , melgan=SCREAMING_SNAKE_CASE_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__UpperCAmelCase : Dict = parser.parse_args()
main(args)
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
def a ( ):
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__UpperCAmelCase : Union[str, Any] = generate_large_matrix()
__UpperCAmelCase : str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
assert all(row == sorted(_a , reverse=_a ) for row in grid )
assert all(list(_a ) == sorted(_a , reverse=_a ) for col in zip(*_a ) )
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Tuple = len(_a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase : str = (left + right) // 2
UpperCamelCase : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase : List[Any] = mid + 1
else:
UpperCamelCase : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_a )
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = len(grid[0] )
for i in range(len(_a ) ):
UpperCamelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(_a ) * len(grid[0] )) - total
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : int = 0
for row in grid:
for i, number in enumerate(_a ):
if number < 0:
total += len(_a ) - i
break
return total
def a ( ):
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
UpperCamelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase : Dict = timeit(F"""{func}(grid=grid)""" , setup=_a , number=5_0_0 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
UpperCamelCase : Union[str, Any] = inputs['''prompt''']
UpperCamelCase : Any = inputs['''generator''']
UpperCamelCase : Optional[int] = inputs['''num_inference_steps''']
UpperCamelCase : Tuple = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase : Union[str, Any] = inputs['''image''']
else:
UpperCamelCase : Tuple = None
if "mask_image" in inputs:
UpperCamelCase : Union[str, Any] = inputs['''mask_image''']
else:
UpperCamelCase : List[str] = None
if "original_image" in inputs:
UpperCamelCase : Optional[Any] = inputs['''original_image''']
else:
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
UpperCamelCase : List[Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : Any = image
if mask_image is not None:
UpperCamelCase : Dict = mask_image
if original_image is not None:
UpperCamelCase : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Tuple = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCamelCase : Tuple = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
UpperCamelCase : List[Any] = inputs['''generator''']
UpperCamelCase : int = inputs['''num_inference_steps''']
UpperCamelCase : Any = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase : Union[str, Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase : List[Any] = image
if mask_image is not None:
UpperCamelCase : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase : List[Any] = original_image
UpperCamelCase : Tuple = pipe_loaded(**snake_case__ )[0]
UpperCamelCase : Dict = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase : Tuple = self.get_dummy_inputs(snake_case__ )
UpperCamelCase : str = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase : List[Any] = self.get_dummy_inputs(snake_case__ )
UpperCamelCase : Optional[Any] = pipe_loaded(**snake_case__ )[0]
UpperCamelCase : str = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__UpperCAmelCase : str = 3
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
UpperCamelCase : List[Any] = random.randrange(3 , _lowercase )
if pow(_lowercase , 2 , _lowercase ) == 1:
continue
if pow(_lowercase , _lowercase , _lowercase ) == 1:
continue
return g
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print('''Generating prime p...''' )
UpperCamelCase : int = rabin_miller.generate_large_prime(_lowercase ) # select large prime number.
UpperCamelCase : str = primitive_root(_lowercase ) # one primitive root on modulo p.
UpperCamelCase : Dict = random.randrange(3 , _lowercase ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase : Optional[Any] = cryptomath.find_mod_inverse(pow(_lowercase , _lowercase , _lowercase ) , _lowercase )
UpperCamelCase : Optional[Any] = (key_size, e_a, e_a, p)
UpperCamelCase : int = (key_size, d)
return public_key, private_key
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
UpperCamelCase : Optional[Any] = generate_key(_lowercase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def a ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
__UpperCAmelCase : Tuple = 300 # TEMPERATURE (unit = K)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
__UpperCAmelCase : str = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowerCAmelCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = CLIPConfig
__UpperCamelCase : List[str] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = CLIPVisionModelWithProjection(config.vision_config )
UpperCamelCase : int = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCamelCase : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.5 , __SCREAMING_SNAKE_CASE=0.5 ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : str = self.p_head(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = nsfw_detected.flatten()
UpperCamelCase : List[Any] = nsfw_detected > p_threshold
UpperCamelCase : Optional[int] = nsfw_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if nsfw_detected_:
UpperCamelCase : Optional[Any] = np.zeros(images[idx].shape )
UpperCamelCase : Dict = self.w_head(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = watermark_detected.flatten()
UpperCamelCase : Any = watermark_detected > w_threshold
UpperCamelCase : Tuple = watermark_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if watermark_detected_:
UpperCamelCase : str = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
import random
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = a[left_index]
UpperCamelCase : Dict = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE__ ):
if a[j] < pivot:
UpperCamelCase : Any = a[i], a[j]
i += 1
UpperCamelCase : Optional[Any] = a[i - 1], a[left_index]
return i - 1
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
if left < right:
UpperCamelCase : Dict = random.randint(SCREAMING_SNAKE_CASE__ , right - 1 )
UpperCamelCase : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase : List[Any] = partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
quick_sort_random(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE__ , pivot_index + 1 , SCREAMING_SNAKE_CASE__ ) # recursive quicksort to the right of the pivot point
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCamelCase : List[Any] = [int(SCREAMING_SNAKE_CASE__ ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[int] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
UpperCamelCase : List[Any] = str(abs(__UpperCamelCase ) )
UpperCamelCase : Union[str, Any] = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )]
for index in range(len(__UpperCamelCase ) ):
num_transpositions[index].pop(__UpperCamelCase )
return max(
int(''''''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : str = 2
UpperCamelCase : Optional[int] = int(math.sqrt(snake_case_ ) ) # Size of every segment
UpperCamelCase : Optional[Any] = [True] * (end + 1)
UpperCamelCase : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
UpperCamelCase : Any = False
start += 1
prime += in_prime
UpperCamelCase : str = end + 1
UpperCamelCase : Optional[int] = min(2 * end , snake_case_ )
while low <= n:
UpperCamelCase : int = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
UpperCamelCase : Tuple = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase : Optional[int] = high + 1
UpperCamelCase : int = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
import numpy as np
from transformers import Pipeline
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = np.max(a__ , axis=-1 , keepdims=a__ )
UpperCamelCase : Any = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a__ )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {}
if "second_text" in kwargs:
UpperCamelCase : Optional[int] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = model_outputs.logits[0].numpy()
UpperCamelCase : Tuple = softmax(__lowerCAmelCase )
UpperCamelCase : int = np.argmax(__lowerCAmelCase )
UpperCamelCase : Optional[Any] = self.model.config.idalabel[best_class]
UpperCamelCase : int = probabilities[best_class].item()
UpperCamelCase : Dict = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Optional[int] = 2
UpperCamelCase : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.